peg-this 3.0.2__py3-none-any.whl → 3.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of peg-this might be problematic. Click here for more details.

File without changes
@@ -0,0 +1,38 @@
1
+
2
+ from pathlib import Path
3
+
4
+ import ffmpeg
5
+ import questionary
6
+ from rich.console import Console
7
+
8
+ from peg_this.utils.ffmpeg_utils import run_command, has_audio_stream
9
+
10
+ console = Console()
11
+
12
+
13
+ def extract_audio(file_path):
14
+ """Extract the audio track from a video file."""
15
+ if not has_audio_stream(file_path):
16
+ console.print("[bold red]Error: No audio stream found in the file.[/bold red]")
17
+ questionary.press_any_key_to_continue().ask()
18
+ return
19
+
20
+ audio_format = questionary.select("Select audio format:", choices=["mp3", "flac", "wav"], use_indicator=True).ask()
21
+ if not audio_format: return
22
+
23
+ output_file = f"{Path(file_path).stem}_audio.{audio_format}"
24
+ stream = ffmpeg.input(file_path).output(output_file, vn=None, acodec='libmp3lame' if audio_format == 'mp3' else audio_format, y=None)
25
+
26
+ run_command(stream, f"Extracting audio to {audio_format.upper()}...", show_progress=True)
27
+ console.print(f"[bold green]Successfully extracted audio to {output_file}[/bold green]")
28
+ questionary.press_any_key_to_continue().ask()
29
+
30
+
31
+ def remove_audio(file_path):
32
+ """Create a silent version of a video."""
33
+ output_file = f"{Path(file_path).stem}_no_audio{Path(file_path).suffix}"
34
+ stream = ffmpeg.input(file_path).output(output_file, vcodec='copy', an=None, y=None)
35
+
36
+ run_command(stream, "Removing audio track...", show_progress=True)
37
+ console.print(f"[bold green]Successfully removed audio, saved to {output_file}[/bold green]")
38
+ questionary.press_any_key_to_continue().ask()
@@ -0,0 +1,125 @@
1
+
2
+ import os
3
+ import logging
4
+ from pathlib import Path
5
+
6
+ import ffmpeg
7
+ import questionary
8
+ from rich.console import Console
9
+
10
+ from peg_this.utils.ffmpeg_utils import run_command, has_audio_stream
11
+ from peg_this.utils.ui_utils import get_media_files
12
+
13
+ console = Console()
14
+
15
+
16
+ def batch_convert():
17
+ """Convert all media files in the directory to a specific format."""
18
+ media_files = get_media_files()
19
+ if not media_files:
20
+ console.print("[bold yellow]No media files found in the current directory.[/bold yellow]")
21
+ questionary.press_any_key_to_continue().ask()
22
+ return
23
+
24
+ output_format = questionary.select(
25
+ "Select output format for the batch conversion:",
26
+ choices=["mp4", "mkv", "mov", "avi", "webm", "mp3", "flac", "wav", "gif"],
27
+ use_indicator=True
28
+ ).ask()
29
+ if not output_format: return
30
+
31
+ quality_preset = None
32
+ if output_format in ["mp4", "mkv", "mov", "avi", "webm"]:
33
+ quality_preset = questionary.select(
34
+ "Select quality preset:",
35
+ choices=["Same as source", "High (CRF 18)", "Medium (CRF 23)", "Low (CRF 28)"],
36
+ use_indicator=True
37
+ ).ask()
38
+ if not quality_preset: return
39
+
40
+ confirm = questionary.confirm(
41
+ f"This will convert {len(media_files)} file(s) in the current directory to .{output_format}. Continue?",
42
+ default=False
43
+ ).ask()
44
+
45
+ if not confirm:
46
+ console.print("[bold yellow]Batch conversion cancelled.[/bold yellow]")
47
+ return
48
+
49
+ success_count = 0
50
+ fail_count = 0
51
+
52
+ for file in media_files:
53
+ console.rule(f"Processing: {file}")
54
+ file_path = os.path.abspath(file)
55
+ is_gif = Path(file_path).suffix.lower() == '.gif'
56
+ has_audio = has_audio_stream(file_path)
57
+
58
+ if (is_gif or not has_audio) and output_format in ["mp3", "flac", "wav"]:
59
+ console.print(f"[bold yellow]Skipping {file}: Source has no audio to convert.[/bold yellow]")
60
+ continue
61
+
62
+ output_file = f"{Path(file_path).stem}_batch.{output_format}"
63
+ input_stream = ffmpeg.input(file_path)
64
+ output_stream = None
65
+ kwargs = {'y': None}
66
+
67
+ try:
68
+ if output_format in ["mp4", "mkv", "mov", "avi", "webm"]:
69
+ if quality_preset == "Same as source":
70
+ kwargs['c'] = 'copy'
71
+ else:
72
+ crf = quality_preset.split(" ")[-1][1:-1]
73
+ kwargs['c:v'] = 'libx264'
74
+ kwargs['crf'] = crf
75
+ kwargs['pix_fmt'] = 'yuv420p'
76
+ if has_audio:
77
+ kwargs['c:a'] = 'aac'
78
+ kwargs['b:a'] = '192k'
79
+ else:
80
+ kwargs['an'] = None
81
+ output_stream = input_stream.output(output_file, **kwargs)
82
+
83
+ elif output_format in ["mp3", "flac", "wav"]:
84
+ kwargs['vn'] = None
85
+ kwargs['c:a'] = 'libmp3lame' if output_format == 'mp3' else output_format
86
+ if output_format == 'mp3':
87
+ kwargs['b:a'] = '192k' # Default bitrate for batch
88
+ output_stream = input_stream.output(output_file, **kwargs)
89
+
90
+ elif output_format == "gif":
91
+ fps = "15"
92
+ scale = "480"
93
+ palette_file = f"palette_{Path(file_path).stem}.png"
94
+
95
+ palette_gen_stream = input_stream.video.filter('fps', fps=fps).filter('scale', w=scale, h=-1, flags='lanczos').filter('palettegen')
96
+ run_command(palette_gen_stream.output(palette_file, y=None), f"Generating palette for {file}...")
97
+
98
+ if not os.path.exists(palette_file):
99
+ console.print(f"[bold red]Failed to generate color palette for {file}.[/bold red]")
100
+ fail_count += 1
101
+ continue
102
+
103
+ palette_input = ffmpeg.input(palette_file)
104
+ video_stream = input_stream.video.filter('fps', fps=fps).filter('scale', w=scale, h=-1, flags='lanczos')
105
+ final_stream = ffmpeg.filter([video_stream, palette_input], 'paletteuse')
106
+ output_stream = final_stream.output(output_file, y=None)
107
+
108
+ if output_stream and run_command(output_stream, f"Converting {file}...", show_progress=True):
109
+ console.print(f" -> [bold green]Successfully converted to {output_file}[/bold green]")
110
+ success_count += 1
111
+ else:
112
+ console.print(f" -> [bold red]Failed to convert {file}.[/bold red]")
113
+ fail_count += 1
114
+
115
+ if output_format == "gif" and os.path.exists(f"palette_{Path(file_path).stem}.png"):
116
+ os.remove(f"palette_{Path(file_path).stem}.png")
117
+
118
+ except Exception as e:
119
+ console.print(f"[bold red]An unexpected error occurred while processing {file}: {e}[/bold red]")
120
+ logging.error(f"Batch convert error for file {file}: {e}")
121
+ fail_count += 1
122
+
123
+ console.rule("[bold green]Batch Conversion Complete[/bold green]")
124
+ console.print(f"Successful: {success_count} | Failed: {fail_count}")
125
+ questionary.press_any_key_to_continue().ask()
@@ -0,0 +1,93 @@
1
+
2
+ import os
3
+ from pathlib import Path
4
+
5
+ import ffmpeg
6
+ import questionary
7
+ from rich.console import Console
8
+
9
+ from peg_this.utils.ffmpeg_utils import run_command, has_audio_stream
10
+
11
+ console = Console()
12
+
13
+
14
+ def convert_file(file_path):
15
+ """Convert the file to a different format."""
16
+ is_gif = Path(file_path).suffix.lower() == '.gif'
17
+ has_audio = has_audio_stream(file_path)
18
+
19
+ output_format = questionary.select("Select the output format:", choices=["mp4", "mkv", "mov", "avi", "webm", "mp3", "flac", "wav", "gif"], use_indicator=True).ask()
20
+ if not output_format: return
21
+
22
+ if (is_gif or not has_audio) and output_format in ["mp3", "flac", "wav"]:
23
+ console.print("[bold red]Error: Source has no audio to convert.[/bold red]")
24
+ questionary.press_any_key_to_continue().ask()
25
+ return
26
+
27
+ output_file = f"{Path(file_path).stem}_converted.{output_format}"
28
+
29
+ input_stream = ffmpeg.input(file_path)
30
+ output_stream = None
31
+ kwargs = {'y': None}
32
+
33
+ if output_format in ["mp4", "mkv", "mov", "avi", "webm"]:
34
+ quality = questionary.select("Select quality preset:", choices=["Same as source", "High (CRF 18)", "Medium (CRF 23)", "Low (CRF 28)"], use_indicator=True).ask()
35
+ if not quality: return
36
+
37
+ if quality == "Same as source":
38
+ kwargs['c'] = 'copy'
39
+ else:
40
+ crf = quality.split(" ")[-1][1:-1]
41
+ kwargs['c:v'] = 'libx264'
42
+ kwargs['crf'] = crf
43
+ kwargs['pix_fmt'] = 'yuv420p'
44
+ if has_audio:
45
+ kwargs['c:a'] = 'aac'
46
+ kwargs['b:a'] = '192k'
47
+ else:
48
+ kwargs['an'] = None
49
+ output_stream = input_stream.output(output_file, **kwargs)
50
+
51
+ elif output_format in ["mp3", "flac", "wav"]:
52
+ kwargs['vn'] = None
53
+ if output_format == 'mp3':
54
+ bitrate = questionary.select("Select audio bitrate:", choices=["128k", "192k", "256k", "320k"]).ask()
55
+ if not bitrate: return
56
+ kwargs['c:a'] = 'libmp3lame'
57
+ kwargs['b:a'] = bitrate
58
+ else:
59
+ kwargs['c:a'] = output_format
60
+ output_stream = input_stream.output(output_file, **kwargs)
61
+
62
+ elif output_format == "gif":
63
+ fps = questionary.text("Enter frame rate (e.g., 15):", default="15").ask()
64
+ if not fps: return
65
+ scale = questionary.text("Enter width in pixels (e.g., 480):", default="480").ask()
66
+ if not scale: return
67
+
68
+ palette_file = f"palette_{Path(file_path).stem}.png"
69
+
70
+ # Correctly chain filters for palette generation using explicit w/h arguments
71
+ palette_gen_stream = input_stream.video.filter('fps', fps=fps).filter('scale', w=scale, h=-1, flags='lanczos').filter('palettegen')
72
+ run_command(palette_gen_stream.output(palette_file, y=None), "Generating color palette...")
73
+
74
+ if not os.path.exists(palette_file):
75
+ console.print("[bold red]Failed to generate color palette for GIF.[/bold red]")
76
+ questionary.press_any_key_to_continue().ask()
77
+ return
78
+
79
+ palette_input = ffmpeg.input(palette_file)
80
+ video_stream = input_stream.video.filter('fps', fps=fps).filter('scale', w=scale, h=-1, flags='lanczos')
81
+
82
+ final_stream = ffmpeg.filter([video_stream, palette_input], 'paletteuse')
83
+ output_stream = final_stream.output(output_file, y=None)
84
+
85
+ if output_stream and run_command(output_stream, f"Converting to {output_format}...", show_progress=True):
86
+ console.print(f"[bold green]Successfully converted to {output_file}[/bold green]")
87
+ else:
88
+ console.print("[bold red]Conversion failed.[/bold red]")
89
+
90
+ if output_format == "gif" and os.path.exists(f"palette_{Path(file_path).stem}.png"):
91
+ os.remove(f"palette_{Path(file_path).stem}.png")
92
+
93
+ questionary.press_any_key_to_continue().ask()
@@ -0,0 +1,106 @@
1
+
2
+ import os
3
+ from pathlib import Path
4
+
5
+ import ffmpeg
6
+ import questionary
7
+ from rich.console import Console
8
+
9
+ from peg_this.utils.ffmpeg_utils import run_command, has_audio_stream
10
+
11
+ try:
12
+ import tkinter as tk
13
+ from tkinter import messagebox
14
+ from PIL import Image, ImageTk
15
+ except ImportError:
16
+ tk = None
17
+
18
+ console = Console()
19
+
20
+
21
+ def crop_video(file_path):
22
+ """Visually crop a video by selecting an area."""
23
+ if not tk:
24
+ console.print("[bold red]Cannot perform visual cropping: tkinter & Pillow are not installed.[/bold red]")
25
+ return
26
+
27
+ preview_frame = f"preview_{Path(file_path).stem}.jpg"
28
+ try:
29
+ # Extract a frame from the middle of the video for preview
30
+ probe = ffmpeg.probe(file_path)
31
+ duration = float(probe['format']['duration'])
32
+ mid_point = duration / 2
33
+
34
+ # Corrected frame extraction command with `-q:v`
35
+ run_command(
36
+ ffmpeg.input(file_path, ss=mid_point).output(preview_frame, vframes=1, **{'q:v': 2}, y=None),
37
+ "Extracting a frame for preview..."
38
+ )
39
+
40
+ if not os.path.exists(preview_frame):
41
+ console.print("[bold red]Could not extract a frame from the video.[/bold red]")
42
+ return
43
+
44
+ # --- Tkinter GUI for Cropping ---
45
+ root = tk.Tk()
46
+ root.title("Crop Video - Drag to select area, close window to confirm")
47
+ root.attributes("-topmost", True)
48
+
49
+ img = Image.open(preview_frame)
50
+ img_tk = ImageTk.PhotoImage(img)
51
+
52
+ canvas = tk.Canvas(root, width=img.width, height=img.height, cursor="cross")
53
+ canvas.pack()
54
+ canvas.create_image(0, 0, anchor=tk.NW, image=img_tk)
55
+
56
+ rect_coords = {"x1": 0, "y1": 0, "x2": 0, "y2": 0}
57
+ rect_id = None
58
+
59
+ def on_press(event):
60
+ nonlocal rect_id
61
+ rect_coords['x1'], rect_coords['y1'] = event.x, event.y
62
+ rect_id = canvas.create_rectangle(0, 0, 1, 1, outline='red', width=2)
63
+
64
+ def on_drag(event):
65
+ rect_coords['x2'], rect_coords['y2'] = event.x, event.y
66
+ canvas.coords(rect_id, rect_coords['x1'], rect_coords['y1'], rect_coords['x2'], rect_coords['y2'])
67
+
68
+ canvas.bind("<ButtonPress-1>", on_press)
69
+ canvas.bind("<B1-Motion>", on_drag)
70
+
71
+ messagebox.showinfo("Instructions", "Click and drag to draw a cropping rectangle.\nClose this window when you are done.", parent=root)
72
+ root.mainloop()
73
+
74
+ # --- Cropping Logic ---
75
+ crop_w = abs(rect_coords['x2'] - rect_coords['x1'])
76
+ crop_h = abs(rect_coords['y2'] - rect_coords['y1'])
77
+ crop_x = min(rect_coords['x1'], rect_coords['x2'])
78
+ crop_y = min(rect_coords['y1'], rect_coords['y2'])
79
+
80
+ if crop_w < 2 or crop_h < 2: # Avoid tiny, invalid crops
81
+ console.print("[bold yellow]Cropping cancelled as no valid area was selected.[/bold yellow]")
82
+ return
83
+
84
+ console.print(f"Selected crop area: [bold]width={crop_w} height={crop_h} at (x={crop_x}, y={crop_y})[/bold]")
85
+
86
+ output_file = f"{Path(file_path).stem}_cropped{Path(file_path).suffix}"
87
+
88
+ input_stream = ffmpeg.input(file_path)
89
+ video_stream = input_stream.video.filter('crop', w=crop_w, h=crop_h, x=crop_x, y=crop_y)
90
+
91
+ kwargs = {'y': None} # Overwrite output
92
+ # Check for audio and copy it if it exists
93
+ if has_audio_stream(file_path):
94
+ audio_stream = input_stream.audio
95
+ kwargs['c:a'] = 'copy'
96
+ stream = ffmpeg.output(video_stream, audio_stream, output_file, **kwargs)
97
+ else:
98
+ stream = ffmpeg.output(video_stream, output_file, **kwargs)
99
+
100
+ run_command(stream, "Applying crop to video...", show_progress=True)
101
+ console.print(f"[bold green]Successfully cropped video and saved to {output_file}[/bold green]")
102
+
103
+ finally:
104
+ if os.path.exists(preview_frame):
105
+ os.remove(preview_frame)
106
+ questionary.press_any_key_to_continue().ask()
@@ -0,0 +1,60 @@
1
+
2
+ import os
3
+ import logging
4
+
5
+ import ffmpeg
6
+ import questionary
7
+ from rich.console import Console
8
+ from rich.table import Table
9
+
10
+ console = Console()
11
+
12
+
13
+ def inspect_file(file_path):
14
+ """Show detailed information about the selected media file using ffprobe."""
15
+ console.print(f"Inspecting {os.path.basename(file_path)}...")
16
+ try:
17
+ info = ffmpeg.probe(file_path)
18
+ except ffmpeg.Error as e:
19
+ console.print("[bold red]An error occurred while inspecting the file:[/bold red]")
20
+ console.print(e.stderr.decode('utf-8'))
21
+ logging.error(f"ffprobe error:{e.stderr.decode('utf-8')}")
22
+ questionary.press_any_key_to_continue().ask()
23
+ return
24
+
25
+ format_info = info.get('format', {})
26
+ table = Table(title=f"File Information: {os.path.basename(file_path)}", show_header=True, header_style="bold magenta")
27
+ table.add_column("Property", style="dim")
28
+ table.add_column("Value")
29
+
30
+ size_mb = float(format_info.get('size', 0)) / (1024 * 1024)
31
+ duration_sec = float(format_info.get('duration', 0))
32
+ bit_rate_kbps = float(format_info.get('bit_rate', 0)) / 1000
33
+
34
+ table.add_row("Size", f"{size_mb:.2f} MB")
35
+ table.add_row("Duration", f"{duration_sec:.2f} seconds")
36
+ table.add_row("Format", format_info.get('format_long_name', 'N/A'))
37
+ table.add_row("Bitrate", f"{bit_rate_kbps:.0f} kb/s")
38
+ console.print(table)
39
+
40
+ for stream_type in ['video', 'audio']:
41
+ streams = [s for s in info.get('streams', []) if s.get('codec_type') == stream_type]
42
+ if streams:
43
+ stream_table = Table(title=f"{stream_type.capitalize()} Streams", show_header=True, header_style=f"bold {'cyan' if stream_type == 'video' else 'green'}")
44
+ stream_table.add_column("Stream")
45
+ stream_table.add_column("Codec")
46
+ if stream_type == 'video':
47
+ stream_table.add_column("Resolution")
48
+ stream_table.add_column("Frame Rate")
49
+ else:
50
+ stream_table.add_column("Sample Rate")
51
+ stream_table.add_column("Channels")
52
+
53
+ for s in streams:
54
+ if stream_type == 'video':
55
+ stream_table.add_row(f"#{s.get('index')}", s.get('codec_name'), f"{s.get('width')}x{s.get('height')}", s.get('r_frame_rate'))
56
+ else:
57
+ stream_table.add_row(f"#{s.get('index')}", s.get('codec_name'), f"{s.get('sample_rate')} Hz", str(s.get('channels')))
58
+ console.print(stream_table)
59
+
60
+ questionary.press_any_key_to_continue().ask()
@@ -0,0 +1,83 @@
1
+
2
+ import os
3
+ from pathlib import Path
4
+
5
+ import ffmpeg
6
+ import questionary
7
+ from rich.console import Console
8
+
9
+ from peg_this.utils.ffmpeg_utils import run_command
10
+ from peg_this.utils.ui_utils import get_media_files
11
+
12
+ console = Console()
13
+
14
+
15
+ def join_videos():
16
+ """Join multiple videos into a single file after standardizing their resolutions and sample rates."""
17
+ console.print("[bold cyan]Select videos to join (in order). Press Enter when done.[/bold cyan]")
18
+
19
+ media_files = get_media_files()
20
+ video_files = [f for f in media_files if Path(f).suffix.lower() in [".mp4", ".mkv", ".mov", ".avi", ".webm"]]
21
+
22
+ if len(video_files) < 2:
23
+ console.print("[bold yellow]Not enough video files in the directory to join.[/bold yellow]")
24
+ questionary.press_any_key_to_continue().ask()
25
+ return
26
+
27
+ selected_videos = questionary.checkbox("Select at least two videos to join in order:", choices=video_files).ask()
28
+
29
+ if not selected_videos or len(selected_videos) < 2:
30
+ console.print("[bold yellow]Joining cancelled. At least two videos must be selected.[/bold yellow]")
31
+ return
32
+
33
+ console.print("Videos will be joined in this order:")
34
+ for i, video in enumerate(selected_videos):
35
+ console.print(f" {i+1}. {video}")
36
+
37
+ output_file = questionary.text("Enter the output file name:", default="joined_video.mp4").ask()
38
+ if not output_file: return
39
+
40
+ try:
41
+ first_video_path = os.path.abspath(selected_videos[0])
42
+ probe = ffmpeg.probe(first_video_path)
43
+ video_info = next(s for s in probe['streams'] if s['codec_type'] == 'video')
44
+ audio_info = next(s for s in probe['streams'] if s['codec_type'] == 'audio')
45
+
46
+ target_width = video_info['width']
47
+ target_height = video_info['height']
48
+ target_sar = video_info.get('sample_aspect_ratio', '1:1')
49
+ target_sample_rate = audio_info['sample_rate']
50
+
51
+ except Exception as e:
52
+ console.print(f"[bold red]Could not probe first video for target parameters: {e}[/bold red]")
53
+ return
54
+
55
+ console.print(f"Standardizing all videos to: {target_width}x{target_height} resolution and {target_sample_rate} Hz audio.")
56
+
57
+ processed_streams = []
58
+ for video_file in selected_videos:
59
+ stream = ffmpeg.input(os.path.abspath(video_file))
60
+ v = (
61
+ stream.video
62
+ .filter('scale', w=target_width, h=target_height, force_original_aspect_ratio='decrease')
63
+ .filter('pad', w=target_width, h=target_height, x='(ow-iw)/2', y='(oh-ih)/2')
64
+ .filter('setsar', sar=target_sar.replace(':','/'))
65
+ .filter('setpts', 'PTS-STARTPTS')
66
+ )
67
+ a = (
68
+ stream.audio
69
+ .filter('aresample', sample_rate=target_sample_rate)
70
+ .filter('asetpts', 'PTS-STARTPTS')
71
+ )
72
+ processed_streams.append(v)
73
+ processed_streams.append(a)
74
+
75
+ joined = ffmpeg.concat(*processed_streams, v=1, a=1).node
76
+ output_stream = ffmpeg.output(joined[0], joined[1], output_file, **{'c:v': 'libx264', 'crf': 23, 'c:a': 'aac', 'b:a': '192k', 'y': None})
77
+
78
+ if run_command(output_stream, "Joining and re-encoding videos...", show_progress=True):
79
+ console.print(f"[bold green]Successfully joined videos into {output_file}[/bold green]")
80
+ else:
81
+ console.print("[bold red]Failed to join videos.[/bold red]")
82
+
83
+ questionary.press_any_key_to_continue().ask()
@@ -0,0 +1,26 @@
1
+
2
+ from pathlib import Path
3
+
4
+ import ffmpeg
5
+ import questionary
6
+ from rich.console import Console
7
+
8
+ from peg_this.utils.ffmpeg_utils import run_command
9
+
10
+ console = Console()
11
+
12
+
13
+ def trim_video(file_path):
14
+ """Cut a video by specifying start and end times."""
15
+ start_time = questionary.text("Enter start time (HH:MM:SS or seconds):").ask()
16
+ if not start_time: return
17
+ end_time = questionary.text("Enter end time (HH:MM:SS or seconds):").ask()
18
+ if not end_time: return
19
+
20
+ output_file = f"{Path(file_path).stem}_trimmed{Path(file_path).suffix}"
21
+
22
+ stream = ffmpeg.input(file_path, ss=start_time, to=end_time).output(output_file, c='copy', y=None)
23
+
24
+ run_command(stream, "Trimming video...", show_progress=True)
25
+ console.print(f"[bold green]Successfully trimmed to {output_file}[/bold green]")
26
+ questionary.press_any_key_to_continue().ask()