peg-this 3.0.1__py3-none-any.whl → 4.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- peg_this/features/__init__.py +0 -0
- peg_this/features/audio.py +38 -0
- peg_this/features/batch.py +125 -0
- peg_this/features/convert.py +225 -0
- peg_this/features/crop.py +178 -0
- peg_this/features/inspect.py +60 -0
- peg_this/features/join.py +83 -0
- peg_this/features/trim.py +26 -0
- peg_this/peg_this.py +52 -620
- peg_this/utils/__init__.py +0 -0
- peg_this/utils/ffmpeg_utils.py +129 -0
- peg_this/utils/ui_utils.py +52 -0
- peg_this-4.0.0.dist-info/METADATA +164 -0
- peg_this-4.0.0.dist-info/RECORD +19 -0
- peg_this-3.0.1.dist-info/METADATA +0 -87
- peg_this-3.0.1.dist-info/RECORD +0 -8
- {peg_this-3.0.1.dist-info → peg_this-4.0.0.dist-info}/WHEEL +0 -0
- {peg_this-3.0.1.dist-info → peg_this-4.0.0.dist-info}/entry_points.txt +0 -0
- {peg_this-3.0.1.dist-info → peg_this-4.0.0.dist-info}/licenses/LICENSE +0 -0
- {peg_this-3.0.1.dist-info → peg_this-4.0.0.dist-info}/top_level.txt +0 -0
|
File without changes
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
import ffmpeg
|
|
5
|
+
import questionary
|
|
6
|
+
from rich.console import Console
|
|
7
|
+
|
|
8
|
+
from peg_this.utils.ffmpeg_utils import run_command, has_audio_stream
|
|
9
|
+
|
|
10
|
+
console = Console()
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def extract_audio(file_path):
|
|
14
|
+
"""Extract the audio track from a video file."""
|
|
15
|
+
if not has_audio_stream(file_path):
|
|
16
|
+
console.print("[bold red]Error: No audio stream found in the file.[/bold red]")
|
|
17
|
+
questionary.press_any_key_to_continue().ask()
|
|
18
|
+
return
|
|
19
|
+
|
|
20
|
+
audio_format = questionary.select("Select audio format:", choices=["mp3", "flac", "wav"], use_indicator=True).ask()
|
|
21
|
+
if not audio_format: return
|
|
22
|
+
|
|
23
|
+
output_file = f"{Path(file_path).stem}_audio.{audio_format}"
|
|
24
|
+
stream = ffmpeg.input(file_path).output(output_file, vn=None, acodec='libmp3lame' if audio_format == 'mp3' else audio_format, y=None)
|
|
25
|
+
|
|
26
|
+
run_command(stream, f"Extracting audio to {audio_format.upper()}...", show_progress=True)
|
|
27
|
+
console.print(f"[bold green]Successfully extracted audio to {output_file}[/bold green]")
|
|
28
|
+
questionary.press_any_key_to_continue().ask()
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def remove_audio(file_path):
|
|
32
|
+
"""Create a silent version of a video."""
|
|
33
|
+
output_file = f"{Path(file_path).stem}_no_audio{Path(file_path).suffix}"
|
|
34
|
+
stream = ffmpeg.input(file_path).output(output_file, vcodec='copy', an=None, y=None)
|
|
35
|
+
|
|
36
|
+
run_command(stream, "Removing audio track...", show_progress=True)
|
|
37
|
+
console.print(f"[bold green]Successfully removed audio, saved to {output_file}[/bold green]")
|
|
38
|
+
questionary.press_any_key_to_continue().ask()
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
|
|
2
|
+
import os
|
|
3
|
+
import logging
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
import ffmpeg
|
|
7
|
+
import questionary
|
|
8
|
+
from rich.console import Console
|
|
9
|
+
|
|
10
|
+
from peg_this.utils.ffmpeg_utils import run_command, has_audio_stream
|
|
11
|
+
from peg_this.utils.ui_utils import get_media_files
|
|
12
|
+
|
|
13
|
+
console = Console()
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def batch_convert():
|
|
17
|
+
"""Convert all media files in the directory to a specific format."""
|
|
18
|
+
media_files = get_media_files()
|
|
19
|
+
if not media_files:
|
|
20
|
+
console.print("[bold yellow]No media files found in the current directory.[/bold yellow]")
|
|
21
|
+
questionary.press_any_key_to_continue().ask()
|
|
22
|
+
return
|
|
23
|
+
|
|
24
|
+
output_format = questionary.select(
|
|
25
|
+
"Select output format for the batch conversion:",
|
|
26
|
+
choices=["mp4", "mkv", "mov", "avi", "webm", "mp3", "flac", "wav", "gif"],
|
|
27
|
+
use_indicator=True
|
|
28
|
+
).ask()
|
|
29
|
+
if not output_format: return
|
|
30
|
+
|
|
31
|
+
quality_preset = None
|
|
32
|
+
if output_format in ["mp4", "mkv", "mov", "avi", "webm"]:
|
|
33
|
+
quality_preset = questionary.select(
|
|
34
|
+
"Select quality preset:",
|
|
35
|
+
choices=["Same as source", "High (CRF 18)", "Medium (CRF 23)", "Low (CRF 28)"],
|
|
36
|
+
use_indicator=True
|
|
37
|
+
).ask()
|
|
38
|
+
if not quality_preset: return
|
|
39
|
+
|
|
40
|
+
confirm = questionary.confirm(
|
|
41
|
+
f"This will convert {len(media_files)} file(s) in the current directory to .{output_format}. Continue?",
|
|
42
|
+
default=False
|
|
43
|
+
).ask()
|
|
44
|
+
|
|
45
|
+
if not confirm:
|
|
46
|
+
console.print("[bold yellow]Batch conversion cancelled.[/bold yellow]")
|
|
47
|
+
return
|
|
48
|
+
|
|
49
|
+
success_count = 0
|
|
50
|
+
fail_count = 0
|
|
51
|
+
|
|
52
|
+
for file in media_files:
|
|
53
|
+
console.rule(f"Processing: {file}")
|
|
54
|
+
file_path = os.path.abspath(file)
|
|
55
|
+
is_gif = Path(file_path).suffix.lower() == '.gif'
|
|
56
|
+
has_audio = has_audio_stream(file_path)
|
|
57
|
+
|
|
58
|
+
if (is_gif or not has_audio) and output_format in ["mp3", "flac", "wav"]:
|
|
59
|
+
console.print(f"[bold yellow]Skipping {file}: Source has no audio to convert.[/bold yellow]")
|
|
60
|
+
continue
|
|
61
|
+
|
|
62
|
+
output_file = f"{Path(file_path).stem}_batch.{output_format}"
|
|
63
|
+
input_stream = ffmpeg.input(file_path)
|
|
64
|
+
output_stream = None
|
|
65
|
+
kwargs = {'y': None}
|
|
66
|
+
|
|
67
|
+
try:
|
|
68
|
+
if output_format in ["mp4", "mkv", "mov", "avi", "webm"]:
|
|
69
|
+
if quality_preset == "Same as source":
|
|
70
|
+
kwargs['c'] = 'copy'
|
|
71
|
+
else:
|
|
72
|
+
crf = quality_preset.split(" ")[-1][1:-1]
|
|
73
|
+
kwargs['c:v'] = 'libx264'
|
|
74
|
+
kwargs['crf'] = crf
|
|
75
|
+
kwargs['pix_fmt'] = 'yuv420p'
|
|
76
|
+
if has_audio:
|
|
77
|
+
kwargs['c:a'] = 'aac'
|
|
78
|
+
kwargs['b:a'] = '192k'
|
|
79
|
+
else:
|
|
80
|
+
kwargs['an'] = None
|
|
81
|
+
output_stream = input_stream.output(output_file, **kwargs)
|
|
82
|
+
|
|
83
|
+
elif output_format in ["mp3", "flac", "wav"]:
|
|
84
|
+
kwargs['vn'] = None
|
|
85
|
+
kwargs['c:a'] = 'libmp3lame' if output_format == 'mp3' else output_format
|
|
86
|
+
if output_format == 'mp3':
|
|
87
|
+
kwargs['b:a'] = '192k' # Default bitrate for batch
|
|
88
|
+
output_stream = input_stream.output(output_file, **kwargs)
|
|
89
|
+
|
|
90
|
+
elif output_format == "gif":
|
|
91
|
+
fps = "15"
|
|
92
|
+
scale = "480"
|
|
93
|
+
palette_file = f"palette_{Path(file_path).stem}.png"
|
|
94
|
+
|
|
95
|
+
palette_gen_stream = input_stream.video.filter('fps', fps=fps).filter('scale', w=scale, h=-1, flags='lanczos').filter('palettegen')
|
|
96
|
+
run_command(palette_gen_stream.output(palette_file, y=None), f"Generating palette for {file}...")
|
|
97
|
+
|
|
98
|
+
if not os.path.exists(palette_file):
|
|
99
|
+
console.print(f"[bold red]Failed to generate color palette for {file}.[/bold red]")
|
|
100
|
+
fail_count += 1
|
|
101
|
+
continue
|
|
102
|
+
|
|
103
|
+
palette_input = ffmpeg.input(palette_file)
|
|
104
|
+
video_stream = input_stream.video.filter('fps', fps=fps).filter('scale', w=scale, h=-1, flags='lanczos')
|
|
105
|
+
final_stream = ffmpeg.filter([video_stream, palette_input], 'paletteuse')
|
|
106
|
+
output_stream = final_stream.output(output_file, y=None)
|
|
107
|
+
|
|
108
|
+
if output_stream and run_command(output_stream, f"Converting {file}...", show_progress=True):
|
|
109
|
+
console.print(f" -> [bold green]Successfully converted to {output_file}[/bold green]")
|
|
110
|
+
success_count += 1
|
|
111
|
+
else:
|
|
112
|
+
console.print(f" -> [bold red]Failed to convert {file}.[/bold red]")
|
|
113
|
+
fail_count += 1
|
|
114
|
+
|
|
115
|
+
if output_format == "gif" and os.path.exists(f"palette_{Path(file_path).stem}.png"):
|
|
116
|
+
os.remove(f"palette_{Path(file_path).stem}.png")
|
|
117
|
+
|
|
118
|
+
except Exception as e:
|
|
119
|
+
console.print(f"[bold red]An unexpected error occurred while processing {file}: {e}[/bold red]")
|
|
120
|
+
logging.error(f"Batch convert error for file {file}: {e}")
|
|
121
|
+
fail_count += 1
|
|
122
|
+
|
|
123
|
+
console.rule("[bold green]Batch Conversion Complete[/bold green]")
|
|
124
|
+
console.print(f"Successful: {success_count} | Failed: {fail_count}")
|
|
125
|
+
questionary.press_any_key_to_continue().ask()
|
|
@@ -0,0 +1,225 @@
|
|
|
1
|
+
|
|
2
|
+
import os
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
import ffmpeg
|
|
6
|
+
import questionary
|
|
7
|
+
from rich.console import Console
|
|
8
|
+
|
|
9
|
+
from peg_this.utils.ffmpeg_utils import run_command, has_audio_stream
|
|
10
|
+
|
|
11
|
+
console = Console()
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def convert_file(file_path):
|
|
15
|
+
"""Convert the file to a different format."""
|
|
16
|
+
is_gif = Path(file_path).suffix.lower() == '.gif'
|
|
17
|
+
has_audio = has_audio_stream(file_path)
|
|
18
|
+
|
|
19
|
+
output_format = questionary.select("Select the output format:", choices=["mp4", "mkv", "mov", "avi", "webm", "mp3", "flac", "wav", "gif"], use_indicator=True).ask()
|
|
20
|
+
if not output_format: return
|
|
21
|
+
|
|
22
|
+
if (is_gif or not has_audio) and output_format in ["mp3", "flac", "wav"]:
|
|
23
|
+
console.print("[bold red]Error: Source has no audio to convert.[/bold red]")
|
|
24
|
+
questionary.press_any_key_to_continue().ask()
|
|
25
|
+
return
|
|
26
|
+
|
|
27
|
+
output_file = f"{Path(file_path).stem}_converted.{output_format}"
|
|
28
|
+
|
|
29
|
+
input_stream = ffmpeg.input(file_path)
|
|
30
|
+
output_stream = None
|
|
31
|
+
kwargs = {'y': None}
|
|
32
|
+
|
|
33
|
+
if output_format in ["mp4", "mkv", "mov", "avi", "webm"]:
|
|
34
|
+
quality = questionary.select("Select quality preset:", choices=["Same as source", "High (CRF 18)", "Medium (CRF 23)", "Low (CRF 28)"], use_indicator=True).ask()
|
|
35
|
+
if not quality: return
|
|
36
|
+
|
|
37
|
+
if quality == "Same as source":
|
|
38
|
+
kwargs['c'] = 'copy'
|
|
39
|
+
else:
|
|
40
|
+
crf = quality.split(" ")[-1][1:-1]
|
|
41
|
+
kwargs['c:v'] = 'libx264'
|
|
42
|
+
kwargs['crf'] = crf
|
|
43
|
+
kwargs['pix_fmt'] = 'yuv420p'
|
|
44
|
+
if has_audio:
|
|
45
|
+
kwargs['c:a'] = 'aac'
|
|
46
|
+
kwargs['b:a'] = '192k'
|
|
47
|
+
else:
|
|
48
|
+
kwargs['an'] = None
|
|
49
|
+
output_stream = input_stream.output(output_file, **kwargs)
|
|
50
|
+
|
|
51
|
+
elif output_format in ["mp3", "flac", "wav"]:
|
|
52
|
+
kwargs['vn'] = None
|
|
53
|
+
if output_format == 'mp3':
|
|
54
|
+
bitrate = questionary.select("Select audio bitrate:", choices=["128k", "192k", "256k", "320k"]).ask()
|
|
55
|
+
if not bitrate: return
|
|
56
|
+
kwargs['c:a'] = 'libmp3lame'
|
|
57
|
+
kwargs['b:a'] = bitrate
|
|
58
|
+
else:
|
|
59
|
+
kwargs['c:a'] = output_format
|
|
60
|
+
output_stream = input_stream.output(output_file, **kwargs)
|
|
61
|
+
|
|
62
|
+
elif output_format == "gif":
|
|
63
|
+
fps = questionary.text("Enter frame rate (e.g., 15):", default="15").ask()
|
|
64
|
+
if not fps: return
|
|
65
|
+
scale = questionary.text("Enter width in pixels (e.g., 480):", default="480").ask()
|
|
66
|
+
if not scale: return
|
|
67
|
+
|
|
68
|
+
palette_file = f"palette_{Path(file_path).stem}.png"
|
|
69
|
+
|
|
70
|
+
# Correctly chain filters for palette generation using explicit w/h arguments
|
|
71
|
+
palette_gen_stream = input_stream.video.filter('fps', fps=fps).filter('scale', w=scale, h=-1, flags='lanczos').filter('palettegen')
|
|
72
|
+
run_command(palette_gen_stream.output(palette_file, y=None), "Generating color palette...")
|
|
73
|
+
|
|
74
|
+
if not os.path.exists(palette_file):
|
|
75
|
+
console.print("[bold red]Failed to generate color palette for GIF.[/bold red]")
|
|
76
|
+
questionary.press_any_key_to_continue().ask()
|
|
77
|
+
return
|
|
78
|
+
|
|
79
|
+
palette_input = ffmpeg.input(palette_file)
|
|
80
|
+
video_stream = input_stream.video.filter('fps', fps=fps).filter('scale', w=scale, h=-1, flags='lanczos')
|
|
81
|
+
|
|
82
|
+
final_stream = ffmpeg.filter([video_stream, palette_input], 'paletteuse')
|
|
83
|
+
output_stream = final_stream.output(output_file, y=None)
|
|
84
|
+
|
|
85
|
+
if output_stream and run_command(output_stream, f"Converting to {output_format}...", show_progress=True):
|
|
86
|
+
console.print(f"[bold green]Successfully converted to {output_file}[/bold green]")
|
|
87
|
+
else:
|
|
88
|
+
console.print("[bold red]Conversion failed.[/bold red]")
|
|
89
|
+
|
|
90
|
+
if output_format == "gif" and os.path.exists(f"palette_{Path(file_path).stem}.png"):
|
|
91
|
+
os.remove(f"palette_{Path(file_path).stem}.png")
|
|
92
|
+
|
|
93
|
+
questionary.press_any_key_to_continue().ask()
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def convert_image(file_path):
|
|
97
|
+
"""Convert an image to a different format."""
|
|
98
|
+
output_format = questionary.select(
|
|
99
|
+
"Select the output format:",
|
|
100
|
+
choices=["jpg", "png", "webp", "bmp", "tiff"],
|
|
101
|
+
use_indicator=True
|
|
102
|
+
).ask()
|
|
103
|
+
if not output_format: return
|
|
104
|
+
|
|
105
|
+
output_file = f"{Path(file_path).stem}_converted.{output_format}"
|
|
106
|
+
kwargs = {'y': None}
|
|
107
|
+
|
|
108
|
+
# For JPG and WEBP, allow quality selection
|
|
109
|
+
if output_format in ['jpg', 'webp']:
|
|
110
|
+
quality_preset = questionary.select(
|
|
111
|
+
"Select quality preset:",
|
|
112
|
+
choices=["High (95%)", "Medium (80%)", "Low (60%)"],
|
|
113
|
+
use_indicator=True
|
|
114
|
+
).ask()
|
|
115
|
+
if not quality_preset: return
|
|
116
|
+
|
|
117
|
+
quality_map = {"High (95%)": "95", "Medium (80%)": "80", "Low (60%)": "60"}
|
|
118
|
+
quality = quality_map[quality_preset]
|
|
119
|
+
|
|
120
|
+
if output_format == 'jpg':
|
|
121
|
+
q_scale = int(31 - (int(quality) / 100.0) * 30)
|
|
122
|
+
kwargs['q:v'] = q_scale
|
|
123
|
+
elif output_format == 'webp':
|
|
124
|
+
kwargs['quality'] = quality
|
|
125
|
+
|
|
126
|
+
stream = ffmpeg.input(file_path).output(output_file, **kwargs)
|
|
127
|
+
|
|
128
|
+
if run_command(stream, f"Converting to {output_format.upper()}..."):
|
|
129
|
+
console.print(f"[bold green]Successfully converted image to {output_file}[/bold green]")
|
|
130
|
+
else:
|
|
131
|
+
console.print("[bold red]Image conversion failed.[/bold red]")
|
|
132
|
+
|
|
133
|
+
questionary.press_any_key_to_continue().ask()
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def resize_image(file_path):
|
|
137
|
+
"""Resize an image to new dimensions."""
|
|
138
|
+
console.print("Enter new dimensions. Use [bold]-1[/bold] for one dimension to preserve aspect ratio.")
|
|
139
|
+
width = questionary.text("Enter new width (e.g., 1280 or -1):").ask()
|
|
140
|
+
if not width: return
|
|
141
|
+
height = questionary.text("Enter new height (e.g., 720 or -1):").ask()
|
|
142
|
+
if not height: return
|
|
143
|
+
|
|
144
|
+
try:
|
|
145
|
+
if int(width) == -1 and int(height) == -1:
|
|
146
|
+
console.print("[bold red]Error: Width and Height cannot both be -1.[/bold red]")
|
|
147
|
+
questionary.press_any_key_to_continue().ask()
|
|
148
|
+
return
|
|
149
|
+
except ValueError:
|
|
150
|
+
console.print("[bold red]Error: Invalid dimensions. Please enter numbers.[/bold red]")
|
|
151
|
+
questionary.press_any_key_to_continue().ask()
|
|
152
|
+
return
|
|
153
|
+
|
|
154
|
+
output_file = f"{Path(file_path).stem}_resized{Path(file_path).suffix}"
|
|
155
|
+
|
|
156
|
+
stream = ffmpeg.input(file_path).filter('scale', w=width, h=height).output(output_file, y=None)
|
|
157
|
+
|
|
158
|
+
if run_command(stream, "Resizing image..."):
|
|
159
|
+
console.print(f"[bold green]Successfully resized image to {output_file}[/bold green]")
|
|
160
|
+
else:
|
|
161
|
+
console.print("[bold red]Image resizing failed.[/bold red]")
|
|
162
|
+
|
|
163
|
+
questionary.press_any_key_to_continue().ask()
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def rotate_image(file_path):
|
|
167
|
+
"""Rotate an image."""
|
|
168
|
+
rotation = questionary.select(
|
|
169
|
+
"Select rotation:",
|
|
170
|
+
choices=[
|
|
171
|
+
"90 degrees clockwise",
|
|
172
|
+
"90 degrees counter-clockwise",
|
|
173
|
+
"180 degrees"
|
|
174
|
+
],
|
|
175
|
+
use_indicator=True
|
|
176
|
+
).ask()
|
|
177
|
+
if not rotation: return
|
|
178
|
+
|
|
179
|
+
output_file = f"{Path(file_path).stem}_rotated{Path(file_path).suffix}"
|
|
180
|
+
|
|
181
|
+
stream = ffmpeg.input(file_path)
|
|
182
|
+
if rotation == "90 degrees clockwise":
|
|
183
|
+
stream = stream.filter('transpose', 1)
|
|
184
|
+
elif rotation == "90 degrees counter-clockwise":
|
|
185
|
+
stream = stream.filter('transpose', 2)
|
|
186
|
+
elif rotation == "180 degrees":
|
|
187
|
+
# Apply 90-degree rotation twice for 180 degrees
|
|
188
|
+
stream = stream.filter('transpose', 2).filter('transpose', 2)
|
|
189
|
+
|
|
190
|
+
output_stream = stream.output(output_file, y=None)
|
|
191
|
+
|
|
192
|
+
if run_command(output_stream, "Rotating image..."):
|
|
193
|
+
console.print(f"[bold green]Successfully rotated image and saved to {output_file}[/bold green]")
|
|
194
|
+
else:
|
|
195
|
+
console.print("[bold red]Image rotation failed.[/bold red]")
|
|
196
|
+
|
|
197
|
+
questionary.press_any_key_to_continue().ask()
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def flip_image(file_path):
|
|
201
|
+
"""Flip an image horizontally or vertically."""
|
|
202
|
+
flip_direction = questionary.select(
|
|
203
|
+
"Select flip direction:",
|
|
204
|
+
choices=["Horizontal", "Vertical"],
|
|
205
|
+
use_indicator=True
|
|
206
|
+
).ask()
|
|
207
|
+
if not flip_direction: return
|
|
208
|
+
|
|
209
|
+
output_file = f"{Path(file_path).stem}_flipped{Path(file_path).suffix}"
|
|
210
|
+
|
|
211
|
+
stream = ffmpeg.input(file_path)
|
|
212
|
+
if flip_direction == "Horizontal":
|
|
213
|
+
stream = stream.filter('hflip')
|
|
214
|
+
else:
|
|
215
|
+
stream = stream.filter('vflip')
|
|
216
|
+
|
|
217
|
+
output_stream = stream.output(output_file, y=None)
|
|
218
|
+
|
|
219
|
+
if run_command(output_stream, "Flipping image..."):
|
|
220
|
+
console.print(f"[bold green]Successfully flipped image and saved to {output_file}[/bold green]")
|
|
221
|
+
else:
|
|
222
|
+
console.print("[bold red]Image flipping failed.[/bold red]")
|
|
223
|
+
|
|
224
|
+
questionary.press_any_key_to_continue().ask()
|
|
225
|
+
|
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
|
|
2
|
+
import os
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
import ffmpeg
|
|
6
|
+
import questionary
|
|
7
|
+
from rich.console import Console
|
|
8
|
+
|
|
9
|
+
from peg_this.utils.ffmpeg_utils import run_command, has_audio_stream
|
|
10
|
+
|
|
11
|
+
try:
|
|
12
|
+
import tkinter as tk
|
|
13
|
+
from tkinter import messagebox
|
|
14
|
+
from PIL import Image, ImageTk
|
|
15
|
+
except ImportError:
|
|
16
|
+
tk = None
|
|
17
|
+
|
|
18
|
+
console = Console()
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def crop_video(file_path):
|
|
22
|
+
"""Visually crop a video by selecting an area."""
|
|
23
|
+
if not tk:
|
|
24
|
+
console.print("[bold red]Cannot perform visual cropping: tkinter & Pillow are not installed.[/bold red]")
|
|
25
|
+
return
|
|
26
|
+
|
|
27
|
+
preview_frame = f"preview_{Path(file_path).stem}.jpg"
|
|
28
|
+
try:
|
|
29
|
+
# Extract a frame from the middle of the video for preview
|
|
30
|
+
probe = ffmpeg.probe(file_path)
|
|
31
|
+
duration = float(probe['format']['duration'])
|
|
32
|
+
mid_point = duration / 2
|
|
33
|
+
|
|
34
|
+
# Corrected frame extraction command with `-q:v`
|
|
35
|
+
run_command(
|
|
36
|
+
ffmpeg.input(file_path, ss=mid_point).output(preview_frame, vframes=1, **{'q:v': 2}, y=None),
|
|
37
|
+
"Extracting a frame for preview..."
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
if not os.path.exists(preview_frame):
|
|
41
|
+
console.print("[bold red]Could not extract a frame from the video.[/bold red]")
|
|
42
|
+
return
|
|
43
|
+
|
|
44
|
+
# --- Tkinter GUI for Cropping ---
|
|
45
|
+
root = tk.Tk()
|
|
46
|
+
root.title("Crop Video - Drag to select area, close window to confirm")
|
|
47
|
+
root.attributes("-topmost", True)
|
|
48
|
+
|
|
49
|
+
img = Image.open(preview_frame)
|
|
50
|
+
img_tk = ImageTk.PhotoImage(img)
|
|
51
|
+
|
|
52
|
+
canvas = tk.Canvas(root, width=img.width, height=img.height, cursor="cross")
|
|
53
|
+
canvas.pack()
|
|
54
|
+
canvas.create_image(0, 0, anchor=tk.NW, image=img_tk)
|
|
55
|
+
|
|
56
|
+
rect_coords = {"x1": 0, "y1": 0, "x2": 0, "y2": 0}
|
|
57
|
+
rect_id = None
|
|
58
|
+
|
|
59
|
+
def on_press(event):
|
|
60
|
+
nonlocal rect_id
|
|
61
|
+
rect_coords['x1'], rect_coords['y1'] = event.x, event.y
|
|
62
|
+
rect_id = canvas.create_rectangle(0, 0, 1, 1, outline='red', width=2)
|
|
63
|
+
|
|
64
|
+
def on_drag(event):
|
|
65
|
+
rect_coords['x2'], rect_coords['y2'] = event.x, event.y
|
|
66
|
+
canvas.coords(rect_id, rect_coords['x1'], rect_coords['y1'], rect_coords['x2'], rect_coords['y2'])
|
|
67
|
+
|
|
68
|
+
canvas.bind("<ButtonPress-1>", on_press)
|
|
69
|
+
canvas.bind("<B1-Motion>", on_drag)
|
|
70
|
+
|
|
71
|
+
messagebox.showinfo("Instructions", "Click and drag to draw a cropping rectangle.\nClose this window when you are done.", parent=root)
|
|
72
|
+
root.mainloop()
|
|
73
|
+
|
|
74
|
+
# --- Cropping Logic ---
|
|
75
|
+
crop_w = abs(rect_coords['x2'] - rect_coords['x1'])
|
|
76
|
+
crop_h = abs(rect_coords['y2'] - rect_coords['y1'])
|
|
77
|
+
crop_x = min(rect_coords['x1'], rect_coords['x2'])
|
|
78
|
+
crop_y = min(rect_coords['y1'], rect_coords['y2'])
|
|
79
|
+
|
|
80
|
+
if crop_w < 2 or crop_h < 2: # Avoid tiny, invalid crops
|
|
81
|
+
console.print("[bold yellow]Cropping cancelled as no valid area was selected.[/bold yellow]")
|
|
82
|
+
return
|
|
83
|
+
|
|
84
|
+
console.print(f"Selected crop area: [bold]width={crop_w} height={crop_h} at (x={crop_x}, y={crop_y})[/bold]")
|
|
85
|
+
|
|
86
|
+
output_file = f"{Path(file_path).stem}_cropped{Path(file_path).suffix}"
|
|
87
|
+
|
|
88
|
+
input_stream = ffmpeg.input(file_path)
|
|
89
|
+
video_stream = input_stream.video.filter('crop', w=crop_w, h=crop_h, x=crop_x, y=crop_y)
|
|
90
|
+
|
|
91
|
+
kwargs = {'y': None} # Overwrite output
|
|
92
|
+
# Check for audio and copy it if it exists
|
|
93
|
+
if has_audio_stream(file_path):
|
|
94
|
+
audio_stream = input_stream.audio
|
|
95
|
+
kwargs['c:a'] = 'copy'
|
|
96
|
+
stream = ffmpeg.output(video_stream, audio_stream, output_file, **kwargs)
|
|
97
|
+
else:
|
|
98
|
+
stream = ffmpeg.output(video_stream, output_file, **kwargs)
|
|
99
|
+
|
|
100
|
+
run_command(stream, "Applying crop to video...", show_progress=True)
|
|
101
|
+
console.print(f"[bold green]Successfully cropped video and saved to {output_file}[/bold green]")
|
|
102
|
+
|
|
103
|
+
finally:
|
|
104
|
+
if os.path.exists(preview_frame):
|
|
105
|
+
os.remove(preview_frame)
|
|
106
|
+
questionary.press_any_key_to_continue().ask()
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def crop_image(file_path):
|
|
110
|
+
"""Visually crop an image by selecting an area."""
|
|
111
|
+
if not tk:
|
|
112
|
+
console.print("[bold red]Cannot perform visual cropping: tkinter & Pillow are not installed.[/bold red]")
|
|
113
|
+
console.print("Please install them with: [bold]pip install tk Pillow[/bold]")
|
|
114
|
+
questionary.press_any_key_to_continue().ask()
|
|
115
|
+
return
|
|
116
|
+
|
|
117
|
+
try:
|
|
118
|
+
# --- Tkinter GUI for Cropping ---
|
|
119
|
+
root = tk.Tk()
|
|
120
|
+
root.title("Crop Image - Drag to select area, close window to confirm")
|
|
121
|
+
root.attributes("-topmost", True)
|
|
122
|
+
|
|
123
|
+
img = Image.open(file_path)
|
|
124
|
+
|
|
125
|
+
max_width = root.winfo_screenwidth() - 100
|
|
126
|
+
max_height = root.winfo_screenheight() - 100
|
|
127
|
+
img.thumbnail((max_width, max_height), Image.Resampling.LANCZOS)
|
|
128
|
+
|
|
129
|
+
img_tk = ImageTk.PhotoImage(img)
|
|
130
|
+
|
|
131
|
+
canvas = tk.Canvas(root, width=img.width, height=img.height, cursor="cross")
|
|
132
|
+
canvas.pack()
|
|
133
|
+
canvas.create_image(0, 0, anchor=tk.NW, image=img_tk)
|
|
134
|
+
|
|
135
|
+
rect_coords = {"x1": 0, "y1": 0, "x2": 0, "y2": 0}
|
|
136
|
+
rect_id = None
|
|
137
|
+
|
|
138
|
+
def on_press(event):
|
|
139
|
+
nonlocal rect_id
|
|
140
|
+
rect_coords['x1'], rect_coords['y1'] = event.x, event.y
|
|
141
|
+
rect_id = canvas.create_rectangle(0, 0, 1, 1, outline='red', width=2)
|
|
142
|
+
|
|
143
|
+
def on_drag(event):
|
|
144
|
+
rect_coords['x2'], rect_coords['y2'] = event.x, event.y
|
|
145
|
+
canvas.coords(rect_id, rect_coords['x1'], rect_coords['y1'], rect_coords['x2'], rect_coords['y2'])
|
|
146
|
+
|
|
147
|
+
canvas.bind("<ButtonPress-1>", on_press)
|
|
148
|
+
canvas.bind("<B1-Motion>", on_drag)
|
|
149
|
+
|
|
150
|
+
messagebox.showinfo("Instructions", "Click and drag to draw a cropping rectangle.\nClose this window when you are done.", parent=root)
|
|
151
|
+
root.mainloop()
|
|
152
|
+
|
|
153
|
+
# --- Cropping Logic ---
|
|
154
|
+
crop_w = abs(rect_coords['x2'] - rect_coords['x1'])
|
|
155
|
+
crop_h = abs(rect_coords['y2'] - rect_coords['y1'])
|
|
156
|
+
crop_x = min(rect_coords['x1'], rect_coords['x2'])
|
|
157
|
+
crop_y = min(rect_coords['y1'], rect_coords['y2'])
|
|
158
|
+
|
|
159
|
+
if crop_w < 2 or crop_h < 2:
|
|
160
|
+
console.print("[bold yellow]Cropping cancelled as no valid area was selected.[/bold yellow]")
|
|
161
|
+
questionary.press_any_key_to_continue().ask()
|
|
162
|
+
return
|
|
163
|
+
|
|
164
|
+
console.print(f"Selected crop area: [bold]width={crop_w} height={crop_h} at (x={crop_x}, y={crop_y})[/bold]")
|
|
165
|
+
|
|
166
|
+
output_file = f"{Path(file_path).stem}_cropped{Path(file_path).suffix}"
|
|
167
|
+
|
|
168
|
+
stream = ffmpeg.input(file_path).filter('crop', w=crop_w, h=crop_h, x=crop_x, y=crop_y).output(output_file, y=None)
|
|
169
|
+
|
|
170
|
+
if run_command(stream, "Applying crop to image..."):
|
|
171
|
+
console.print(f"[bold green]Successfully cropped image and saved to {output_file}[/bold green]")
|
|
172
|
+
else:
|
|
173
|
+
console.print("[bold red]Image cropping failed.[/bold red]")
|
|
174
|
+
|
|
175
|
+
except Exception as e:
|
|
176
|
+
console.print(f"[bold red]An error occurred during cropping: {e}[/bold red]")
|
|
177
|
+
finally:
|
|
178
|
+
questionary.press_any_key_to_continue().ask()
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
|
|
2
|
+
import os
|
|
3
|
+
import logging
|
|
4
|
+
|
|
5
|
+
import ffmpeg
|
|
6
|
+
import questionary
|
|
7
|
+
from rich.console import Console
|
|
8
|
+
from rich.table import Table
|
|
9
|
+
|
|
10
|
+
console = Console()
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def inspect_file(file_path):
|
|
14
|
+
"""Show detailed information about the selected media file using ffprobe."""
|
|
15
|
+
console.print(f"Inspecting {os.path.basename(file_path)}...")
|
|
16
|
+
try:
|
|
17
|
+
info = ffmpeg.probe(file_path)
|
|
18
|
+
except ffmpeg.Error as e:
|
|
19
|
+
console.print("[bold red]An error occurred while inspecting the file:[/bold red]")
|
|
20
|
+
console.print(e.stderr.decode('utf-8'))
|
|
21
|
+
logging.error(f"ffprobe error:{e.stderr.decode('utf-8')}")
|
|
22
|
+
questionary.press_any_key_to_continue().ask()
|
|
23
|
+
return
|
|
24
|
+
|
|
25
|
+
format_info = info.get('format', {})
|
|
26
|
+
table = Table(title=f"File Information: {os.path.basename(file_path)}", show_header=True, header_style="bold magenta")
|
|
27
|
+
table.add_column("Property", style="dim")
|
|
28
|
+
table.add_column("Value")
|
|
29
|
+
|
|
30
|
+
size_mb = float(format_info.get('size', 0)) / (1024 * 1024)
|
|
31
|
+
duration_sec = float(format_info.get('duration', 0))
|
|
32
|
+
bit_rate_kbps = float(format_info.get('bit_rate', 0)) / 1000
|
|
33
|
+
|
|
34
|
+
table.add_row("Size", f"{size_mb:.2f} MB")
|
|
35
|
+
table.add_row("Duration", f"{duration_sec:.2f} seconds")
|
|
36
|
+
table.add_row("Format", format_info.get('format_long_name', 'N/A'))
|
|
37
|
+
table.add_row("Bitrate", f"{bit_rate_kbps:.0f} kb/s")
|
|
38
|
+
console.print(table)
|
|
39
|
+
|
|
40
|
+
for stream_type in ['video', 'audio']:
|
|
41
|
+
streams = [s for s in info.get('streams', []) if s.get('codec_type') == stream_type]
|
|
42
|
+
if streams:
|
|
43
|
+
stream_table = Table(title=f"{stream_type.capitalize()} Streams", show_header=True, header_style=f"bold {'cyan' if stream_type == 'video' else 'green'}")
|
|
44
|
+
stream_table.add_column("Stream")
|
|
45
|
+
stream_table.add_column("Codec")
|
|
46
|
+
if stream_type == 'video':
|
|
47
|
+
stream_table.add_column("Resolution")
|
|
48
|
+
stream_table.add_column("Frame Rate")
|
|
49
|
+
else:
|
|
50
|
+
stream_table.add_column("Sample Rate")
|
|
51
|
+
stream_table.add_column("Channels")
|
|
52
|
+
|
|
53
|
+
for s in streams:
|
|
54
|
+
if stream_type == 'video':
|
|
55
|
+
stream_table.add_row(f"#{s.get('index')}", s.get('codec_name'), f"{s.get('width')}x{s.get('height')}", s.get('r_frame_rate'))
|
|
56
|
+
else:
|
|
57
|
+
stream_table.add_row(f"#{s.get('index')}", s.get('codec_name'), f"{s.get('sample_rate')} Hz", str(s.get('channels')))
|
|
58
|
+
console.print(stream_table)
|
|
59
|
+
|
|
60
|
+
questionary.press_any_key_to_continue().ask()
|