peg-this 3.0.2__py3-none-any.whl → 4.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- peg_this/features/__init__.py +0 -0
- peg_this/features/audio.py +85 -0
- peg_this/features/batch.py +151 -0
- peg_this/features/convert.py +309 -0
- peg_this/features/crop.py +207 -0
- peg_this/features/inspect.py +110 -0
- peg_this/features/join.py +137 -0
- peg_this/features/subtitle.py +397 -0
- peg_this/features/trim.py +56 -0
- peg_this/peg_this.py +53 -622
- peg_this/utils/__init__.py +0 -0
- peg_this/utils/ffmpeg_utils.py +129 -0
- peg_this/utils/ui_utils.py +52 -0
- peg_this/utils/validation.py +228 -0
- peg_this-4.1.0.dist-info/METADATA +283 -0
- peg_this-4.1.0.dist-info/RECORD +21 -0
- {peg_this-3.0.2.dist-info → peg_this-4.1.0.dist-info}/WHEEL +1 -1
- peg_this-3.0.2.dist-info/METADATA +0 -87
- peg_this-3.0.2.dist-info/RECORD +0 -8
- {peg_this-3.0.2.dist-info → peg_this-4.1.0.dist-info}/entry_points.txt +0 -0
- {peg_this-3.0.2.dist-info → peg_this-4.1.0.dist-info}/licenses/LICENSE +0 -0
- {peg_this-3.0.2.dist-info → peg_this-4.1.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,397 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import tempfile
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
import ffmpeg
|
|
6
|
+
import questionary
|
|
7
|
+
from rich.console import Console
|
|
8
|
+
from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TimeElapsedColumn
|
|
9
|
+
|
|
10
|
+
from peg_this.utils.ffmpeg_utils import run_command, has_audio_stream
|
|
11
|
+
from peg_this.utils.validation import (
|
|
12
|
+
validate_input_file, check_output_file, check_disk_space,
|
|
13
|
+
get_video_duration, format_duration, press_continue
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
console = Console()
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def check_existing_subtitles(file_path):
|
|
20
|
+
try:
|
|
21
|
+
probe = ffmpeg.probe(file_path)
|
|
22
|
+
subtitle_streams = [s for s in probe.get('streams', []) if s.get('codec_type') == 'subtitle']
|
|
23
|
+
return len(subtitle_streams) > 0, len(subtitle_streams)
|
|
24
|
+
except Exception:
|
|
25
|
+
return False, 0
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def sanitize_path_for_filter(path):
|
|
29
|
+
path_str = str(path)
|
|
30
|
+
path_str = path_str.replace("\\", "/")
|
|
31
|
+
path_str = path_str.replace(":", "\\:")
|
|
32
|
+
path_str = path_str.replace("'", "\\'")
|
|
33
|
+
return path_str
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def extract_audio_for_whisper(input_file, temp_dir):
|
|
37
|
+
temp_wav = os.path.join(temp_dir, "temp_audio.wav")
|
|
38
|
+
try:
|
|
39
|
+
console.print("[cyan]Extracting audio for analysis...[/cyan]")
|
|
40
|
+
(
|
|
41
|
+
ffmpeg
|
|
42
|
+
.input(input_file)
|
|
43
|
+
.output(temp_wav, ac=1, ar=16000, vn=None, loglevel="error")
|
|
44
|
+
.overwrite_output()
|
|
45
|
+
.run(capture_stdout=True, capture_stderr=True)
|
|
46
|
+
)
|
|
47
|
+
if not os.path.exists(temp_wav):
|
|
48
|
+
console.print("[bold red]Error: Failed to extract audio file.[/bold red]")
|
|
49
|
+
return None
|
|
50
|
+
if os.path.getsize(temp_wav) == 0:
|
|
51
|
+
console.print("[bold red]Error: Extracted audio is empty.[/bold red]")
|
|
52
|
+
return None
|
|
53
|
+
return temp_wav
|
|
54
|
+
except ffmpeg.Error as e:
|
|
55
|
+
error_msg = e.stderr.decode() if e.stderr else "Unknown error"
|
|
56
|
+
console.print(f"[bold red]Failed to extract audio: {error_msg}[/bold red]")
|
|
57
|
+
return None
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def format_timestamp(seconds):
|
|
61
|
+
hours = int(seconds // 3600)
|
|
62
|
+
minutes = int((seconds % 3600) // 60)
|
|
63
|
+
secs = int(seconds % 60)
|
|
64
|
+
millis = int((seconds - int(seconds)) * 1000)
|
|
65
|
+
return f"{hours:02d}:{minutes:02d}:{secs:02d},{millis:03d}"
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def format_timestamp_vtt(seconds):
|
|
69
|
+
hours = int(seconds // 3600)
|
|
70
|
+
minutes = int((seconds % 3600) // 60)
|
|
71
|
+
secs = int(seconds % 60)
|
|
72
|
+
millis = int((seconds - int(seconds)) * 1000)
|
|
73
|
+
return f"{hours:02d}:{minutes:02d}:{secs:02d}.{millis:03d}"
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def segments_to_srt(segments):
|
|
77
|
+
srt_content = []
|
|
78
|
+
for i, segment in enumerate(segments, 1):
|
|
79
|
+
start = format_timestamp(segment.start)
|
|
80
|
+
end = format_timestamp(segment.end)
|
|
81
|
+
text = segment.text.strip()
|
|
82
|
+
srt_content.append(f"{i}\n{start} --> {end}\n{text}\n")
|
|
83
|
+
return "\n".join(srt_content)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def segments_to_vtt(segments):
|
|
87
|
+
vtt_content = ["WEBVTT\n"]
|
|
88
|
+
for segment in segments:
|
|
89
|
+
start = format_timestamp_vtt(segment.start)
|
|
90
|
+
end = format_timestamp_vtt(segment.end)
|
|
91
|
+
text = segment.text.strip()
|
|
92
|
+
vtt_content.append(f"{start} --> {end}\n{text}\n")
|
|
93
|
+
return "\n".join(vtt_content)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def segments_to_txt(segments):
|
|
97
|
+
return "\n".join(segment.text.strip() for segment in segments)
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def segments_to_lrc(segments):
|
|
101
|
+
lrc_content = []
|
|
102
|
+
for segment in segments:
|
|
103
|
+
minutes = int(segment.start // 60)
|
|
104
|
+
seconds = segment.start % 60
|
|
105
|
+
text = segment.text.strip()
|
|
106
|
+
lrc_content.append(f"[{minutes:02d}:{seconds:05.2f}]{text}")
|
|
107
|
+
return "\n".join(lrc_content)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def generate_subtitles(file_path):
|
|
111
|
+
if not validate_input_file(file_path):
|
|
112
|
+
press_continue()
|
|
113
|
+
return
|
|
114
|
+
|
|
115
|
+
if not has_audio_stream(file_path):
|
|
116
|
+
console.print("[bold red]Error: File has no audio stream.[/bold red]")
|
|
117
|
+
console.print("[dim]Subtitles require audio to transcribe.[/dim]")
|
|
118
|
+
press_continue()
|
|
119
|
+
return
|
|
120
|
+
|
|
121
|
+
has_subs, sub_count = check_existing_subtitles(file_path)
|
|
122
|
+
if has_subs:
|
|
123
|
+
console.print(f"[yellow]Note: This video already has {sub_count} subtitle track(s) embedded.[/yellow]")
|
|
124
|
+
if not questionary.confirm("Continue generating new subtitles?", default=True).ask():
|
|
125
|
+
return
|
|
126
|
+
|
|
127
|
+
duration = get_video_duration(file_path)
|
|
128
|
+
if duration > 3600:
|
|
129
|
+
console.print(f"[yellow]Note: This is a long video ({format_duration(duration)}).[/yellow]")
|
|
130
|
+
console.print("[dim]Transcription may take a while. Consider using a smaller model for faster results.[/dim]")
|
|
131
|
+
if not questionary.confirm("Continue?", default=True).ask():
|
|
132
|
+
return
|
|
133
|
+
|
|
134
|
+
try:
|
|
135
|
+
from faster_whisper import WhisperModel
|
|
136
|
+
except ImportError:
|
|
137
|
+
console.print("[bold red]Error: faster-whisper is not installed.[/bold red]")
|
|
138
|
+
console.print("[yellow]Install it with: pip install faster-whisper[/yellow]")
|
|
139
|
+
press_continue()
|
|
140
|
+
return
|
|
141
|
+
|
|
142
|
+
console.print("\n[bold cyan]Subtitle Generation (Whisper AI)[/bold cyan]")
|
|
143
|
+
if duration > 0:
|
|
144
|
+
console.print(f"[dim]Video duration: {format_duration(duration)}[/dim]")
|
|
145
|
+
|
|
146
|
+
if duration > 1800:
|
|
147
|
+
default_model = "tiny.en (fastest, English only, ~75MB)"
|
|
148
|
+
console.print("[dim]Tip: For long videos, smaller models are recommended.[/dim]")
|
|
149
|
+
else:
|
|
150
|
+
default_model = "small.en (balanced, English only, ~500MB)"
|
|
151
|
+
|
|
152
|
+
model_choice = questionary.select(
|
|
153
|
+
"Select Whisper model:",
|
|
154
|
+
choices=[
|
|
155
|
+
"tiny.en (fastest, English only, ~75MB)",
|
|
156
|
+
"base.en (fast, English only, ~150MB)",
|
|
157
|
+
"small.en (balanced, English only, ~500MB)",
|
|
158
|
+
"medium.en (accurate, English only, ~1.5GB)",
|
|
159
|
+
"small (balanced, multilingual, ~500MB)",
|
|
160
|
+
"medium (accurate, multilingual, ~1.5GB)",
|
|
161
|
+
"large-v3 (best quality, multilingual, ~3GB)",
|
|
162
|
+
],
|
|
163
|
+
default=default_model
|
|
164
|
+
).ask()
|
|
165
|
+
if not model_choice:
|
|
166
|
+
return
|
|
167
|
+
|
|
168
|
+
model_name = model_choice.split(" ")[0]
|
|
169
|
+
|
|
170
|
+
language = "en"
|
|
171
|
+
if not model_name.endswith(".en"):
|
|
172
|
+
if questionary.confirm("Change language? (default: English)", default=False).ask():
|
|
173
|
+
console.print("\n[dim]Common codes: en (English), ta (Tamil), hi (Hindi), te (Telugu),")
|
|
174
|
+
console.print("ml (Malayalam), kn (Kannada), fr (French), de (German), es (Spanish), zh (Chinese)[/dim]")
|
|
175
|
+
console.print("[dim]Full list: https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes[/dim]")
|
|
176
|
+
language = questionary.text(
|
|
177
|
+
"Enter language code (or 'auto' to detect automatically):",
|
|
178
|
+
default="en"
|
|
179
|
+
).ask()
|
|
180
|
+
if not language:
|
|
181
|
+
return
|
|
182
|
+
if language == "auto":
|
|
183
|
+
language = None
|
|
184
|
+
|
|
185
|
+
processing_mode = questionary.select(
|
|
186
|
+
"Select processing mode:",
|
|
187
|
+
choices=[
|
|
188
|
+
"Fast (Recommended) - Optimized for speed, great accuracy",
|
|
189
|
+
"Accurate - Best quality, slower processing",
|
|
190
|
+
],
|
|
191
|
+
default="Fast (Recommended) - Optimized for speed, great accuracy"
|
|
192
|
+
).ask()
|
|
193
|
+
if not processing_mode:
|
|
194
|
+
return
|
|
195
|
+
|
|
196
|
+
compute_type = "int8" if "Fast" in processing_mode else "float32"
|
|
197
|
+
|
|
198
|
+
action = questionary.select(
|
|
199
|
+
"What do you want to do with the subtitles?",
|
|
200
|
+
choices=[
|
|
201
|
+
"Export as sidecar file (.srt/.vtt)",
|
|
202
|
+
"Embed into video (Soft Subtitles)",
|
|
203
|
+
"Burn into video (Hard Subtitles)"
|
|
204
|
+
]
|
|
205
|
+
).ask()
|
|
206
|
+
if not action:
|
|
207
|
+
return
|
|
208
|
+
|
|
209
|
+
output_format = "srt"
|
|
210
|
+
if "sidecar" in action:
|
|
211
|
+
output_format = questionary.select(
|
|
212
|
+
"Select format:",
|
|
213
|
+
choices=["srt", "vtt", "txt", "lrc"]
|
|
214
|
+
).ask()
|
|
215
|
+
if not output_format:
|
|
216
|
+
return
|
|
217
|
+
|
|
218
|
+
input_p = Path(file_path)
|
|
219
|
+
|
|
220
|
+
if "sidecar" in action:
|
|
221
|
+
output_path = input_p.with_name(f"{input_p.stem}.{output_format}")
|
|
222
|
+
action_result, final_output_path = check_output_file(str(output_path), "Subtitle file")
|
|
223
|
+
elif "Embed" in action:
|
|
224
|
+
output_path = input_p.with_name(f"{input_p.stem}_softsub{input_p.suffix}")
|
|
225
|
+
action_result, final_output_path = check_output_file(str(output_path), "Video file")
|
|
226
|
+
elif "Burn" in action:
|
|
227
|
+
output_path = input_p.with_name(f"{input_p.stem}_hardsub{input_p.suffix}")
|
|
228
|
+
action_result, final_output_path = check_output_file(str(output_path), "Video file")
|
|
229
|
+
if action_result != 'cancel' and not check_disk_space(file_path, multiplier=2):
|
|
230
|
+
return
|
|
231
|
+
else:
|
|
232
|
+
action_result = 'proceed'
|
|
233
|
+
final_output_path = None
|
|
234
|
+
|
|
235
|
+
if action_result == 'cancel':
|
|
236
|
+
console.print("[yellow]Operation cancelled.[/yellow]")
|
|
237
|
+
press_continue()
|
|
238
|
+
return
|
|
239
|
+
|
|
240
|
+
crf = "23"
|
|
241
|
+
if "Burn" in action:
|
|
242
|
+
quality = questionary.select(
|
|
243
|
+
"Select Video Quality (CRF):",
|
|
244
|
+
choices=["High (18)", "Medium (23)", "Low (28)"],
|
|
245
|
+
default="Medium (23)"
|
|
246
|
+
).ask()
|
|
247
|
+
if not quality:
|
|
248
|
+
return
|
|
249
|
+
crf = quality.split("(")[1].strip(")")
|
|
250
|
+
|
|
251
|
+
with tempfile.TemporaryDirectory() as temp_dir:
|
|
252
|
+
wav_path = extract_audio_for_whisper(file_path, temp_dir)
|
|
253
|
+
if not wav_path:
|
|
254
|
+
press_continue()
|
|
255
|
+
return
|
|
256
|
+
|
|
257
|
+
console.print(f"[cyan]Loading Whisper model '{model_name}'...[/cyan]")
|
|
258
|
+
console.print("[dim]First run will download the model (may take a few minutes)[/dim]")
|
|
259
|
+
|
|
260
|
+
try:
|
|
261
|
+
model = WhisperModel(model_name, device="cpu", compute_type=compute_type)
|
|
262
|
+
except Exception as e:
|
|
263
|
+
error_msg = str(e)
|
|
264
|
+
if "out of memory" in error_msg.lower():
|
|
265
|
+
console.print("[bold red]Error: Not enough memory to load model.[/bold red]")
|
|
266
|
+
console.print("[yellow]Try using a smaller model (tiny or base).[/yellow]")
|
|
267
|
+
elif "network" in error_msg.lower() or "connection" in error_msg.lower():
|
|
268
|
+
console.print("[bold red]Error: Failed to download model. Check your internet connection.[/bold red]")
|
|
269
|
+
else:
|
|
270
|
+
console.print(f"[bold red]Failed to load model: {e}[/bold red]")
|
|
271
|
+
press_continue()
|
|
272
|
+
return
|
|
273
|
+
|
|
274
|
+
console.print("[cyan]Transcribing audio...[/cyan]")
|
|
275
|
+
|
|
276
|
+
try:
|
|
277
|
+
with Progress(
|
|
278
|
+
SpinnerColumn(),
|
|
279
|
+
TextColumn("[progress.description]{task.description}"),
|
|
280
|
+
BarColumn(),
|
|
281
|
+
TimeElapsedColumn(),
|
|
282
|
+
console=console
|
|
283
|
+
) as progress:
|
|
284
|
+
task = progress.add_task("Transcribing...", total=None)
|
|
285
|
+
segments_generator, info = model.transcribe(
|
|
286
|
+
wav_path,
|
|
287
|
+
language=language,
|
|
288
|
+
beam_size=5,
|
|
289
|
+
vad_filter=True,
|
|
290
|
+
vad_parameters=dict(min_silence_duration_ms=500)
|
|
291
|
+
)
|
|
292
|
+
segments = list(segments_generator)
|
|
293
|
+
progress.update(task, completed=100)
|
|
294
|
+
|
|
295
|
+
except KeyboardInterrupt:
|
|
296
|
+
console.print("\n[yellow]Transcription cancelled by user.[/yellow]")
|
|
297
|
+
press_continue()
|
|
298
|
+
return
|
|
299
|
+
except Exception as e:
|
|
300
|
+
error_msg = str(e)
|
|
301
|
+
if "out of memory" in error_msg.lower():
|
|
302
|
+
console.print("[bold red]Error: Ran out of memory during transcription.[/bold red]")
|
|
303
|
+
console.print("[yellow]Try using a smaller model or processing a shorter video.[/yellow]")
|
|
304
|
+
else:
|
|
305
|
+
console.print(f"[bold red]Transcription failed: {e}[/bold red]")
|
|
306
|
+
press_continue()
|
|
307
|
+
return
|
|
308
|
+
|
|
309
|
+
if not segments:
|
|
310
|
+
console.print("[bold yellow]No speech detected in audio.[/bold yellow]")
|
|
311
|
+
console.print("[dim]The video might be silent, have only music, or the audio quality is too low.[/dim]")
|
|
312
|
+
press_continue()
|
|
313
|
+
return
|
|
314
|
+
|
|
315
|
+
detected_lang = info.language if language is None else language
|
|
316
|
+
console.print(f"[green]Detected language: {detected_lang}[/green]")
|
|
317
|
+
console.print(f"[green]Transcribed {len(segments)} segments[/green]")
|
|
318
|
+
|
|
319
|
+
if output_format == "srt" or "Embed" in action or "Burn" in action:
|
|
320
|
+
subtitle_content = segments_to_srt(segments)
|
|
321
|
+
sub_ext = "srt"
|
|
322
|
+
elif output_format == "vtt":
|
|
323
|
+
subtitle_content = segments_to_vtt(segments)
|
|
324
|
+
sub_ext = "vtt"
|
|
325
|
+
elif output_format == "txt":
|
|
326
|
+
subtitle_content = segments_to_txt(segments)
|
|
327
|
+
sub_ext = "txt"
|
|
328
|
+
elif output_format == "lrc":
|
|
329
|
+
subtitle_content = segments_to_lrc(segments)
|
|
330
|
+
sub_ext = "lrc"
|
|
331
|
+
else:
|
|
332
|
+
subtitle_content = segments_to_srt(segments)
|
|
333
|
+
sub_ext = "srt"
|
|
334
|
+
|
|
335
|
+
if not subtitle_content.strip():
|
|
336
|
+
console.print("[bold yellow]Warning: Generated subtitles are empty.[/bold yellow]")
|
|
337
|
+
press_continue()
|
|
338
|
+
return
|
|
339
|
+
|
|
340
|
+
sub_temp_path = os.path.join(temp_dir, f"output.{sub_ext}")
|
|
341
|
+
try:
|
|
342
|
+
with open(sub_temp_path, "w", encoding="utf-8") as f:
|
|
343
|
+
f.write(subtitle_content)
|
|
344
|
+
except IOError as e:
|
|
345
|
+
console.print(f"[bold red]Error writing subtitle file: {e}[/bold red]")
|
|
346
|
+
press_continue()
|
|
347
|
+
return
|
|
348
|
+
|
|
349
|
+
try:
|
|
350
|
+
if "sidecar" in action:
|
|
351
|
+
with open(final_output_path, "w", encoding="utf-8") as f:
|
|
352
|
+
f.write(subtitle_content)
|
|
353
|
+
console.print(f"[bold green]Saved subtitles to: {final_output_path}[/bold green]")
|
|
354
|
+
|
|
355
|
+
elif "Embed" in action:
|
|
356
|
+
console.print("[cyan]Embedding subtitles (Soft Subs)...[/cyan]")
|
|
357
|
+
ext = input_p.suffix.lower()
|
|
358
|
+
scodec = "mov_text" if ext in ['.mp4', '.m4v', '.mov'] else "srt"
|
|
359
|
+
stream = ffmpeg.input(file_path)
|
|
360
|
+
sub_stream = ffmpeg.input(sub_temp_path)
|
|
361
|
+
out = ffmpeg.output(
|
|
362
|
+
stream, sub_stream, str(final_output_path),
|
|
363
|
+
c='copy', **{'c:s': scodec}, **{'metadata:s:s:0': f'language={detected_lang}'}
|
|
364
|
+
)
|
|
365
|
+
if action_result == 'overwrite':
|
|
366
|
+
out = out.overwrite_output()
|
|
367
|
+
if run_command(out, "Embedding subtitles...", show_progress=True):
|
|
368
|
+
console.print(f"[bold green]Created: {final_output_path}[/bold green]")
|
|
369
|
+
else:
|
|
370
|
+
console.print("[bold red]Failed to embed subtitles.[/bold red]")
|
|
371
|
+
|
|
372
|
+
elif "Burn" in action:
|
|
373
|
+
console.print("[cyan]Burning subtitles (Hard Subs)...[/cyan]")
|
|
374
|
+
console.print("[dim]This requires re-encoding and may take a while...[/dim]")
|
|
375
|
+
stream = ffmpeg.input(file_path)
|
|
376
|
+
video = stream.video.filter('subtitles', sub_temp_path)
|
|
377
|
+
audio = stream.audio
|
|
378
|
+
out = ffmpeg.output(
|
|
379
|
+
video, audio, str(final_output_path),
|
|
380
|
+
vcodec='libx264', acodec='copy', crf=crf, preset='fast'
|
|
381
|
+
)
|
|
382
|
+
if action_result == 'overwrite':
|
|
383
|
+
out = out.overwrite_output()
|
|
384
|
+
if run_command(out, "Burning subtitles (Re-encoding)...", show_progress=True):
|
|
385
|
+
console.print(f"[bold green]Created: {final_output_path}[/bold green]")
|
|
386
|
+
else:
|
|
387
|
+
console.print("[bold red]Failed to burn subtitles.[/bold red]")
|
|
388
|
+
|
|
389
|
+
except PermissionError:
|
|
390
|
+
console.print("[bold red]Error: Permission denied. Cannot write to output location.[/bold red]")
|
|
391
|
+
console.print("[dim]Try saving to a different location or check folder permissions.[/dim]")
|
|
392
|
+
except IOError as e:
|
|
393
|
+
console.print(f"[bold red]Error writing output: {e}[/bold red]")
|
|
394
|
+
except Exception as e:
|
|
395
|
+
console.print(f"[bold red]Unexpected error: {e}[/bold red]")
|
|
396
|
+
|
|
397
|
+
press_continue()
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
|
|
3
|
+
import ffmpeg
|
|
4
|
+
import questionary
|
|
5
|
+
from rich.console import Console
|
|
6
|
+
|
|
7
|
+
from peg_this.utils.ffmpeg_utils import run_command
|
|
8
|
+
from peg_this.utils.validation import (
|
|
9
|
+
validate_input_file, check_output_file, get_video_duration,
|
|
10
|
+
validate_time_range, format_duration, press_continue
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
console = Console()
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def trim_video(file_path):
|
|
17
|
+
if not validate_input_file(file_path):
|
|
18
|
+
press_continue()
|
|
19
|
+
return
|
|
20
|
+
|
|
21
|
+
duration = get_video_duration(file_path)
|
|
22
|
+
if duration > 0:
|
|
23
|
+
console.print(f"[dim]Video duration: {format_duration(duration)}[/dim]")
|
|
24
|
+
|
|
25
|
+
start_time = questionary.text("Enter start time (HH:MM:SS or seconds):").ask()
|
|
26
|
+
if not start_time:
|
|
27
|
+
return
|
|
28
|
+
|
|
29
|
+
end_time = questionary.text("Enter end time (HH:MM:SS or seconds):").ask()
|
|
30
|
+
if not end_time:
|
|
31
|
+
return
|
|
32
|
+
|
|
33
|
+
start_secs, end_secs = validate_time_range(start_time, end_time, duration if duration > 0 else None)
|
|
34
|
+
if start_secs is None:
|
|
35
|
+
press_continue()
|
|
36
|
+
return
|
|
37
|
+
|
|
38
|
+
output_file = f"{Path(file_path).stem}_trimmed{Path(file_path).suffix}"
|
|
39
|
+
action_result, final_output = check_output_file(output_file, "Video file")
|
|
40
|
+
|
|
41
|
+
if action_result == 'cancel':
|
|
42
|
+
console.print("[yellow]Operation cancelled.[/yellow]")
|
|
43
|
+
press_continue()
|
|
44
|
+
return
|
|
45
|
+
|
|
46
|
+
stream = ffmpeg.input(file_path, ss=start_secs, to=end_secs).output(final_output, c='copy')
|
|
47
|
+
|
|
48
|
+
if action_result == 'overwrite':
|
|
49
|
+
stream = stream.overwrite_output()
|
|
50
|
+
|
|
51
|
+
if run_command(stream, "Trimming video...", show_progress=True):
|
|
52
|
+
console.print(f"[bold green]Successfully trimmed to {final_output}[/bold green]")
|
|
53
|
+
else:
|
|
54
|
+
console.print("[bold red]Failed to trim video.[/bold red]")
|
|
55
|
+
|
|
56
|
+
press_continue()
|