talks-reducer 0.4.1__py3-none-any.whl → 0.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- talks_reducer/__about__.py +1 -1
- talks_reducer/cli.py +25 -4
- talks_reducer/ffmpeg.py +34 -6
- talks_reducer/gui.py +162 -27
- talks_reducer/models.py +29 -4
- talks_reducer/pipeline.py +83 -9
- talks_reducer/server.py +354 -0
- talks_reducer/service_client.py +102 -0
- talks_reducer-0.5.1.dist-info/METADATA +119 -0
- talks_reducer-0.5.1.dist-info/RECORD +19 -0
- {talks_reducer-0.4.1.dist-info → talks_reducer-0.5.1.dist-info}/entry_points.txt +1 -0
- talks_reducer-0.4.1.dist-info/METADATA +0 -71
- talks_reducer-0.4.1.dist-info/RECORD +0 -17
- {talks_reducer-0.4.1.dist-info → talks_reducer-0.5.1.dist-info}/WHEEL +0 -0
- {talks_reducer-0.4.1.dist-info → talks_reducer-0.5.1.dist-info}/licenses/LICENSE +0 -0
- {talks_reducer-0.4.1.dist-info → talks_reducer-0.5.1.dist-info}/top_level.txt +0 -0
talks_reducer/pipeline.py
CHANGED
@@ -27,7 +27,15 @@ from .progress import NullProgressReporter, ProgressReporter
|
|
27
27
|
|
28
28
|
def _input_to_output_filename(filename: Path, small: bool = False) -> Path:
|
29
29
|
dot_index = filename.name.rfind(".")
|
30
|
-
|
30
|
+
suffix_parts = []
|
31
|
+
|
32
|
+
if small:
|
33
|
+
suffix_parts.append("_small")
|
34
|
+
|
35
|
+
if not suffix_parts:
|
36
|
+
suffix_parts.append("") # Default case
|
37
|
+
|
38
|
+
suffix = "_speedup" + "".join(suffix_parts)
|
31
39
|
new_name = (
|
32
40
|
filename.name[:dot_index] + suffix + filename.name[dot_index:]
|
33
41
|
if dot_index != -1
|
@@ -38,7 +46,7 @@ def _input_to_output_filename(filename: Path, small: bool = False) -> Path:
|
|
38
46
|
|
39
47
|
def _create_path(path: Path) -> None:
|
40
48
|
try:
|
41
|
-
path.mkdir()
|
49
|
+
path.mkdir(parents=True, exist_ok=True)
|
42
50
|
except OSError as exc: # pragma: no cover - defensive logging
|
43
51
|
raise AssertionError(
|
44
52
|
"Creation of the directory failed. (The TEMP folder may already exist. Delete or rename it, and try again.)"
|
@@ -74,7 +82,7 @@ def _extract_video_metadata(input_file: Path, frame_rate: float) -> Dict[str, fl
|
|
74
82
|
"-select_streams",
|
75
83
|
"v",
|
76
84
|
"-show_entries",
|
77
|
-
"format=duration:stream=avg_frame_rate",
|
85
|
+
"format=duration:stream=avg_frame_rate,nb_frames",
|
78
86
|
]
|
79
87
|
process = subprocess.Popen(
|
80
88
|
command,
|
@@ -92,7 +100,14 @@ def _extract_video_metadata(input_file: Path, frame_rate: float) -> Dict[str, fl
|
|
92
100
|
match_duration = re.search(r"duration=([\d.]*)", str(stdout))
|
93
101
|
original_duration = float(match_duration.group(1)) if match_duration else 0.0
|
94
102
|
|
95
|
-
|
103
|
+
match_frames = re.search(r"nb_frames=(\d+)", str(stdout))
|
104
|
+
frame_count = int(match_frames.group(1)) if match_frames else 0
|
105
|
+
|
106
|
+
return {
|
107
|
+
"frame_rate": frame_rate,
|
108
|
+
"duration": original_duration,
|
109
|
+
"frame_count": frame_count,
|
110
|
+
}
|
96
111
|
|
97
112
|
|
98
113
|
def _ensure_two_dimensional(audio_data: np.ndarray) -> np.ndarray:
|
@@ -135,6 +150,18 @@ def speed_up_video(
|
|
135
150
|
metadata = _extract_video_metadata(input_path, options.frame_rate)
|
136
151
|
frame_rate = metadata["frame_rate"]
|
137
152
|
original_duration = metadata["duration"]
|
153
|
+
frame_count = metadata.get("frame_count", 0)
|
154
|
+
|
155
|
+
reporter.log(
|
156
|
+
(
|
157
|
+
"Source metadata — duration: {duration:.2f}s, frame rate: {fps:.3f} fps,"
|
158
|
+
" reported frames: {frames}"
|
159
|
+
).format(
|
160
|
+
duration=original_duration,
|
161
|
+
fps=frame_rate,
|
162
|
+
frames=frame_count if frame_count > 0 else "unknown",
|
163
|
+
)
|
164
|
+
)
|
138
165
|
|
139
166
|
reporter.log("Processing on: {}".format("GPU (CUDA)" if cuda_available else "CPU"))
|
140
167
|
if options.small:
|
@@ -148,10 +175,12 @@ def speed_up_video(
|
|
148
175
|
audio_bitrate = "128k" if options.small else "160k"
|
149
176
|
audio_wav = temp_path / "audio.wav"
|
150
177
|
|
178
|
+
extraction_sample_rate = options.sample_rate
|
179
|
+
|
151
180
|
extract_command = build_extract_audio_command(
|
152
181
|
os.fspath(input_path),
|
153
182
|
os.fspath(audio_wav),
|
154
|
-
|
183
|
+
extraction_sample_rate,
|
155
184
|
audio_bitrate,
|
156
185
|
hwaccel,
|
157
186
|
ffmpeg_path=ffmpeg_path,
|
@@ -159,10 +188,19 @@ def speed_up_video(
|
|
159
188
|
|
160
189
|
reporter.log("Extracting audio...")
|
161
190
|
process_callback = getattr(reporter, "process_callback", None)
|
191
|
+
estimated_total_frames = frame_count
|
192
|
+
if estimated_total_frames <= 0 and original_duration > 0 and frame_rate > 0:
|
193
|
+
estimated_total_frames = int(math.ceil(original_duration * frame_rate))
|
194
|
+
|
195
|
+
if estimated_total_frames > 0:
|
196
|
+
reporter.log(f"Extract audio target frames: {estimated_total_frames}")
|
197
|
+
else:
|
198
|
+
reporter.log("Extract audio target frames: unknown")
|
199
|
+
|
162
200
|
run_timed_ffmpeg_command(
|
163
201
|
extract_command,
|
164
202
|
reporter=reporter,
|
165
|
-
total=
|
203
|
+
total=estimated_total_frames if estimated_total_frames > 0 else None,
|
166
204
|
unit="frames",
|
167
205
|
desc="Extracting audio:",
|
168
206
|
process_callback=process_callback,
|
@@ -202,9 +240,11 @@ def speed_up_video(
|
|
202
240
|
)
|
203
241
|
|
204
242
|
audio_new_path = temp_path / "audioNew.wav"
|
243
|
+
# Use the sample rate that was actually used for processing
|
244
|
+
output_sample_rate = extraction_sample_rate
|
205
245
|
wavfile.write(
|
206
246
|
os.fspath(audio_new_path),
|
207
|
-
|
247
|
+
output_sample_rate,
|
208
248
|
_prepare_output_audio(output_audio_data),
|
209
249
|
)
|
210
250
|
|
@@ -246,10 +286,29 @@ def speed_up_video(
|
|
246
286
|
raise FileNotFoundError("Filter graph file was not generated")
|
247
287
|
|
248
288
|
try:
|
289
|
+
final_total_frames = updated_chunks[-1][3] if updated_chunks else 0
|
290
|
+
if final_total_frames > 0:
|
291
|
+
reporter.log(f"Final encode target frames: {final_total_frames}")
|
292
|
+
if frame_rate > 0:
|
293
|
+
final_duration_seconds = final_total_frames / frame_rate
|
294
|
+
reporter.log(
|
295
|
+
(
|
296
|
+
"Final encode target duration: {duration:.2f}s at {fps:.3f} fps"
|
297
|
+
).format(duration=final_duration_seconds, fps=frame_rate)
|
298
|
+
)
|
299
|
+
else:
|
300
|
+
reporter.log(
|
301
|
+
"Final encode target duration: unknown (missing frame rate)"
|
302
|
+
)
|
303
|
+
else:
|
304
|
+
reporter.log("Final encode target frames: unknown")
|
305
|
+
|
306
|
+
total_frames_arg = final_total_frames if final_total_frames > 0 else None
|
307
|
+
|
249
308
|
run_timed_ffmpeg_command(
|
250
309
|
command_str,
|
251
310
|
reporter=reporter,
|
252
|
-
total=
|
311
|
+
total=total_frames_arg,
|
253
312
|
unit="frames",
|
254
313
|
desc="Generating final:",
|
255
314
|
process_callback=process_callback,
|
@@ -257,10 +316,25 @@ def speed_up_video(
|
|
257
316
|
except subprocess.CalledProcessError as exc:
|
258
317
|
if fallback_command_str and use_cuda_encoder:
|
259
318
|
reporter.log("CUDA encoding failed, retrying with CPU encoder...")
|
319
|
+
if final_total_frames > 0:
|
320
|
+
reporter.log(
|
321
|
+
f"Final encode target frames (fallback): {final_total_frames}"
|
322
|
+
)
|
323
|
+
else:
|
324
|
+
reporter.log("Final encode target frames (fallback): unknown")
|
325
|
+
if final_total_frames > 0 and frame_rate > 0:
|
326
|
+
reporter.log(
|
327
|
+
(
|
328
|
+
"Final encode target duration (fallback): {duration:.2f}s at {fps:.3f} fps"
|
329
|
+
).format(
|
330
|
+
duration=final_total_frames / frame_rate,
|
331
|
+
fps=frame_rate,
|
332
|
+
)
|
333
|
+
)
|
260
334
|
run_timed_ffmpeg_command(
|
261
335
|
fallback_command_str,
|
262
336
|
reporter=reporter,
|
263
|
-
total=
|
337
|
+
total=total_frames_arg,
|
264
338
|
unit="frames",
|
265
339
|
desc="Generating final (fallback):",
|
266
340
|
process_callback=process_callback,
|
talks_reducer/server.py
ADDED
@@ -0,0 +1,354 @@
|
|
1
|
+
"""Gradio-powered simple server for running Talks Reducer in a browser."""
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
import argparse
|
6
|
+
import atexit
|
7
|
+
import shutil
|
8
|
+
import tempfile
|
9
|
+
from contextlib import AbstractContextManager, suppress
|
10
|
+
from pathlib import Path
|
11
|
+
from typing import Callable, Optional, Sequence
|
12
|
+
|
13
|
+
import gradio as gr
|
14
|
+
|
15
|
+
from talks_reducer.ffmpeg import FFmpegNotFoundError
|
16
|
+
from talks_reducer.models import ProcessingOptions, ProcessingResult
|
17
|
+
from talks_reducer.pipeline import speed_up_video
|
18
|
+
from talks_reducer.progress import ProgressHandle, SignalProgressReporter
|
19
|
+
|
20
|
+
|
21
|
+
class _GradioProgressHandle(AbstractContextManager[ProgressHandle]):
|
22
|
+
"""Translate pipeline progress updates into Gradio progress callbacks."""
|
23
|
+
|
24
|
+
def __init__(
|
25
|
+
self,
|
26
|
+
reporter: "GradioProgressReporter",
|
27
|
+
*,
|
28
|
+
desc: str,
|
29
|
+
total: Optional[int],
|
30
|
+
unit: str,
|
31
|
+
) -> None:
|
32
|
+
self._reporter = reporter
|
33
|
+
self._desc = desc.strip() or "Processing"
|
34
|
+
self._unit = unit
|
35
|
+
self._total = total
|
36
|
+
self._current = 0
|
37
|
+
self._reporter._start_task(self._desc, self._total)
|
38
|
+
|
39
|
+
@property
|
40
|
+
def current(self) -> int:
|
41
|
+
"""Return the number of processed units reported so far."""
|
42
|
+
|
43
|
+
return self._current
|
44
|
+
|
45
|
+
def ensure_total(self, total: int) -> None:
|
46
|
+
"""Update the total units when FFmpeg discovers a larger frame count."""
|
47
|
+
|
48
|
+
if total > 0 and (self._total is None or total > self._total):
|
49
|
+
self._total = total
|
50
|
+
self._reporter._update_progress(self._current, self._total, self._desc)
|
51
|
+
|
52
|
+
def advance(self, amount: int) -> None:
|
53
|
+
"""Advance the current progress and notify the UI."""
|
54
|
+
|
55
|
+
if amount <= 0:
|
56
|
+
return
|
57
|
+
self._current += amount
|
58
|
+
self._reporter._update_progress(self._current, self._total, self._desc)
|
59
|
+
|
60
|
+
def finish(self) -> None:
|
61
|
+
"""Fill the progress bar when FFmpeg completes."""
|
62
|
+
|
63
|
+
if self._total is not None:
|
64
|
+
self._current = self._total
|
65
|
+
else:
|
66
|
+
# Without a known total, treat the final frame count as the total so the
|
67
|
+
# progress bar reaches 100%.
|
68
|
+
inferred_total = self._current if self._current > 0 else 1
|
69
|
+
self._reporter._update_progress(self._current, inferred_total, self._desc)
|
70
|
+
return
|
71
|
+
self._reporter._update_progress(self._current, self._total, self._desc)
|
72
|
+
|
73
|
+
def __enter__(self) -> "_GradioProgressHandle":
|
74
|
+
return self
|
75
|
+
|
76
|
+
def __exit__(self, exc_type, exc, tb) -> bool:
|
77
|
+
if exc_type is None:
|
78
|
+
self.finish()
|
79
|
+
return False
|
80
|
+
|
81
|
+
|
82
|
+
class GradioProgressReporter(SignalProgressReporter):
|
83
|
+
"""Progress reporter that forwards updates to Gradio's progress widget."""
|
84
|
+
|
85
|
+
def __init__(
|
86
|
+
self,
|
87
|
+
progress_callback: Optional[Callable[[int, int, str], None]] = None,
|
88
|
+
*,
|
89
|
+
max_log_lines: int = 500,
|
90
|
+
) -> None:
|
91
|
+
super().__init__()
|
92
|
+
self._progress_callback = progress_callback
|
93
|
+
self._max_log_lines = max_log_lines
|
94
|
+
self._active_desc = "Processing"
|
95
|
+
self.logs: list[str] = []
|
96
|
+
|
97
|
+
def log(self, message: str) -> None:
|
98
|
+
"""Collect log messages for display in the web interface."""
|
99
|
+
|
100
|
+
text = message.strip()
|
101
|
+
if not text:
|
102
|
+
return
|
103
|
+
self.logs.append(text)
|
104
|
+
if len(self.logs) > self._max_log_lines:
|
105
|
+
self.logs = self.logs[-self._max_log_lines :]
|
106
|
+
|
107
|
+
def task(
|
108
|
+
self,
|
109
|
+
*,
|
110
|
+
desc: str = "",
|
111
|
+
total: Optional[int] = None,
|
112
|
+
unit: str = "",
|
113
|
+
) -> AbstractContextManager[ProgressHandle]:
|
114
|
+
"""Create a context manager bridging pipeline progress to Gradio."""
|
115
|
+
|
116
|
+
return _GradioProgressHandle(self, desc=desc, total=total, unit=unit)
|
117
|
+
|
118
|
+
# Internal helpers -------------------------------------------------
|
119
|
+
|
120
|
+
def _start_task(self, desc: str, total: Optional[int]) -> None:
|
121
|
+
self._active_desc = desc or "Processing"
|
122
|
+
self._update_progress(0, total, self._active_desc)
|
123
|
+
|
124
|
+
def _update_progress(
|
125
|
+
self, current: int, total: Optional[int], desc: Optional[str]
|
126
|
+
) -> None:
|
127
|
+
if self._progress_callback is None:
|
128
|
+
return
|
129
|
+
if total is None or total <= 0:
|
130
|
+
total_value = max(1, int(current) + 1 if current >= 0 else 1)
|
131
|
+
bounded_current = max(0, int(current))
|
132
|
+
else:
|
133
|
+
total_value = max(int(total), 1, int(current))
|
134
|
+
bounded_current = max(0, min(int(current), int(total_value)))
|
135
|
+
display_desc = desc or self._active_desc
|
136
|
+
self._progress_callback(bounded_current, total_value, display_desc)
|
137
|
+
|
138
|
+
|
139
|
+
_WORKSPACES: list[Path] = []
|
140
|
+
|
141
|
+
|
142
|
+
def _allocate_workspace() -> Path:
|
143
|
+
"""Create and remember a workspace directory for a single request."""
|
144
|
+
|
145
|
+
path = Path(tempfile.mkdtemp(prefix="talks_reducer_web_"))
|
146
|
+
_WORKSPACES.append(path)
|
147
|
+
return path
|
148
|
+
|
149
|
+
|
150
|
+
def _cleanup_workspaces() -> None:
|
151
|
+
"""Remove any workspaces that remain when the process exits."""
|
152
|
+
|
153
|
+
for workspace in _WORKSPACES:
|
154
|
+
if workspace.exists():
|
155
|
+
with suppress(Exception):
|
156
|
+
shutil.rmtree(workspace)
|
157
|
+
_WORKSPACES.clear()
|
158
|
+
|
159
|
+
|
160
|
+
def _build_output_path(input_path: Path, workspace: Path, small: bool) -> Path:
|
161
|
+
"""Mirror the CLI output naming scheme inside the workspace directory."""
|
162
|
+
|
163
|
+
suffix = input_path.suffix or ".mp4"
|
164
|
+
stem = input_path.stem
|
165
|
+
marker = "_speedup_small" if small else "_speedup"
|
166
|
+
return workspace / f"{stem}{marker}{suffix}"
|
167
|
+
|
168
|
+
|
169
|
+
def _format_duration(seconds: float) -> str:
|
170
|
+
"""Return a compact human-readable duration string."""
|
171
|
+
|
172
|
+
if seconds <= 0:
|
173
|
+
return "0s"
|
174
|
+
total_seconds = int(round(seconds))
|
175
|
+
hours, remainder = divmod(total_seconds, 3600)
|
176
|
+
minutes, secs = divmod(remainder, 60)
|
177
|
+
parts: list[str] = []
|
178
|
+
if hours:
|
179
|
+
parts.append(f"{hours}h")
|
180
|
+
if minutes or hours:
|
181
|
+
parts.append(f"{minutes}m")
|
182
|
+
parts.append(f"{secs}s")
|
183
|
+
return " ".join(parts)
|
184
|
+
|
185
|
+
|
186
|
+
def _format_summary(result: ProcessingResult) -> str:
|
187
|
+
"""Produce a Markdown summary of the processing result."""
|
188
|
+
|
189
|
+
lines = [
|
190
|
+
f"**Input:** `{result.input_file.name}`",
|
191
|
+
f"**Output:** `{result.output_file.name}`",
|
192
|
+
]
|
193
|
+
|
194
|
+
duration_line = (
|
195
|
+
f"**Duration:** {_format_duration(result.output_duration)}"
|
196
|
+
f" ({_format_duration(result.original_duration)} original)"
|
197
|
+
)
|
198
|
+
if result.time_ratio is not None:
|
199
|
+
duration_line += f" — {result.time_ratio * 100:.1f}% of the original"
|
200
|
+
lines.append(duration_line)
|
201
|
+
|
202
|
+
if result.size_ratio is not None:
|
203
|
+
size_percent = result.size_ratio * 100
|
204
|
+
lines.append(f"**Size:** {size_percent:.1f}% of the original file")
|
205
|
+
|
206
|
+
lines.append(f"**Chunks merged:** {result.chunk_count}")
|
207
|
+
lines.append(f"**Encoder:** {'CUDA' if result.used_cuda else 'CPU'}")
|
208
|
+
|
209
|
+
return "\n".join(lines)
|
210
|
+
|
211
|
+
|
212
|
+
def process_video(
|
213
|
+
file_path: Optional[str],
|
214
|
+
small_video: bool,
|
215
|
+
progress: Optional[gr.Progress] = gr.Progress(track_tqdm=False),
|
216
|
+
) -> tuple[Optional[str], str, str, Optional[str]]:
|
217
|
+
"""Run the Talks Reducer pipeline for a single uploaded file."""
|
218
|
+
|
219
|
+
if not file_path:
|
220
|
+
raise gr.Error("Please upload a video file to begin processing.")
|
221
|
+
|
222
|
+
input_path = Path(file_path)
|
223
|
+
if not input_path.exists():
|
224
|
+
raise gr.Error("The uploaded file is no longer available on the server.")
|
225
|
+
|
226
|
+
workspace = _allocate_workspace()
|
227
|
+
temp_folder = workspace / "temp"
|
228
|
+
output_file = _build_output_path(input_path, workspace, small_video)
|
229
|
+
|
230
|
+
progress_callback: Optional[Callable[[int, int, str], None]] = None
|
231
|
+
if progress is not None:
|
232
|
+
|
233
|
+
def _callback(current: int, total: int, desc: str) -> None:
|
234
|
+
progress(current, total=total, desc=desc)
|
235
|
+
|
236
|
+
progress_callback = _callback
|
237
|
+
|
238
|
+
reporter = GradioProgressReporter(progress_callback=progress_callback)
|
239
|
+
|
240
|
+
options = ProcessingOptions(
|
241
|
+
input_file=input_path,
|
242
|
+
output_file=output_file,
|
243
|
+
temp_folder=temp_folder,
|
244
|
+
small=small_video,
|
245
|
+
)
|
246
|
+
|
247
|
+
try:
|
248
|
+
result = speed_up_video(options, reporter=reporter)
|
249
|
+
except FFmpegNotFoundError as exc: # pragma: no cover - depends on runtime env
|
250
|
+
raise gr.Error(str(exc)) from exc
|
251
|
+
except FileNotFoundError as exc:
|
252
|
+
raise gr.Error(str(exc)) from exc
|
253
|
+
except Exception as exc: # pragma: no cover - defensive fallback
|
254
|
+
reporter.log(f"Error: {exc}")
|
255
|
+
raise gr.Error(f"Failed to process the video: {exc}") from exc
|
256
|
+
|
257
|
+
reporter.log("Processing complete.")
|
258
|
+
log_text = "\n".join(reporter.logs)
|
259
|
+
summary = _format_summary(result)
|
260
|
+
|
261
|
+
return (
|
262
|
+
str(result.output_file),
|
263
|
+
log_text,
|
264
|
+
summary,
|
265
|
+
str(result.output_file),
|
266
|
+
)
|
267
|
+
|
268
|
+
|
269
|
+
def build_interface() -> gr.Blocks:
|
270
|
+
"""Construct the Gradio Blocks application for the simple web UI."""
|
271
|
+
|
272
|
+
with gr.Blocks(title="Talks Reducer Web UI") as demo:
|
273
|
+
gr.Markdown(
|
274
|
+
"""
|
275
|
+
## Talks Reducer — Simple Server
|
276
|
+
Drop a video into the zone below or click to browse. Toggle **Small video** to
|
277
|
+
apply the 720p/128k preset before processing starts.
|
278
|
+
""".strip()
|
279
|
+
)
|
280
|
+
|
281
|
+
with gr.Row():
|
282
|
+
file_input = gr.File(
|
283
|
+
label="Video file",
|
284
|
+
file_types=["video"],
|
285
|
+
type="filepath",
|
286
|
+
)
|
287
|
+
small_checkbox = gr.Checkbox(label="Small video", value=False)
|
288
|
+
|
289
|
+
video_output = gr.Video(label="Processed video")
|
290
|
+
summary_output = gr.Markdown()
|
291
|
+
download_output = gr.File(label="Download processed file", interactive=False)
|
292
|
+
log_output = gr.Textbox(label="Log", lines=12, interactive=False)
|
293
|
+
|
294
|
+
file_input.upload(
|
295
|
+
process_video,
|
296
|
+
inputs=[file_input, small_checkbox],
|
297
|
+
outputs=[video_output, log_output, summary_output, download_output],
|
298
|
+
queue=True,
|
299
|
+
api_name="process_video",
|
300
|
+
)
|
301
|
+
|
302
|
+
demo.queue(default_concurrency_limit=1)
|
303
|
+
return demo
|
304
|
+
|
305
|
+
|
306
|
+
def main(argv: Optional[Sequence[str]] = None) -> None:
|
307
|
+
"""Launch the Gradio server from the command line."""
|
308
|
+
|
309
|
+
parser = argparse.ArgumentParser(description="Launch the Talks Reducer web UI.")
|
310
|
+
parser.add_argument(
|
311
|
+
"--host", dest="host", default=None, help="Custom host to bind."
|
312
|
+
)
|
313
|
+
parser.add_argument(
|
314
|
+
"--port",
|
315
|
+
dest="port",
|
316
|
+
type=int,
|
317
|
+
default=9005,
|
318
|
+
help="Port number for the web server (default: 9005).",
|
319
|
+
)
|
320
|
+
parser.add_argument(
|
321
|
+
"--share",
|
322
|
+
action="store_true",
|
323
|
+
help="Create a temporary public Gradio link.",
|
324
|
+
)
|
325
|
+
parser.add_argument(
|
326
|
+
"--no-browser",
|
327
|
+
action="store_true",
|
328
|
+
help="Do not automatically open the browser window.",
|
329
|
+
)
|
330
|
+
|
331
|
+
args = parser.parse_args(argv)
|
332
|
+
|
333
|
+
demo = build_interface()
|
334
|
+
demo.launch(
|
335
|
+
server_name=args.host,
|
336
|
+
server_port=args.port,
|
337
|
+
share=args.share,
|
338
|
+
inbrowser=not args.no_browser,
|
339
|
+
)
|
340
|
+
|
341
|
+
|
342
|
+
atexit.register(_cleanup_workspaces)
|
343
|
+
|
344
|
+
|
345
|
+
__all__ = [
|
346
|
+
"GradioProgressReporter",
|
347
|
+
"build_interface",
|
348
|
+
"main",
|
349
|
+
"process_video",
|
350
|
+
]
|
351
|
+
|
352
|
+
|
353
|
+
if __name__ == "__main__": # pragma: no cover - convenience entry point
|
354
|
+
main()
|
@@ -0,0 +1,102 @@
|
|
1
|
+
"""Command-line helper for sending videos to the Talks Reducer server."""
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
import argparse
|
6
|
+
import shutil
|
7
|
+
from pathlib import Path
|
8
|
+
from typing import Optional, Sequence, Tuple
|
9
|
+
|
10
|
+
from gradio_client import Client
|
11
|
+
from gradio_client import file as gradio_file
|
12
|
+
|
13
|
+
|
14
|
+
def send_video(
|
15
|
+
input_path: Path,
|
16
|
+
output_path: Optional[Path],
|
17
|
+
server_url: str,
|
18
|
+
small: bool = False,
|
19
|
+
) -> Tuple[Path, str, str]:
|
20
|
+
"""Upload *input_path* to the Gradio server and download the processed video."""
|
21
|
+
|
22
|
+
if not input_path.exists():
|
23
|
+
raise FileNotFoundError(f"Input file does not exist: {input_path}")
|
24
|
+
|
25
|
+
client = Client(server_url)
|
26
|
+
prediction = client.predict(
|
27
|
+
gradio_file(str(input_path)),
|
28
|
+
bool(small),
|
29
|
+
api_name="/process_video",
|
30
|
+
)
|
31
|
+
|
32
|
+
try:
|
33
|
+
_, log_text, summary, download_path = prediction
|
34
|
+
except (TypeError, ValueError) as exc: # pragma: no cover - defensive
|
35
|
+
raise RuntimeError("Unexpected response from server") from exc
|
36
|
+
|
37
|
+
if not download_path:
|
38
|
+
raise RuntimeError("Server did not return a processed file")
|
39
|
+
|
40
|
+
download_source = Path(str(download_path))
|
41
|
+
if output_path is None:
|
42
|
+
destination = Path.cwd() / download_source.name
|
43
|
+
else:
|
44
|
+
destination = output_path
|
45
|
+
if destination.is_dir():
|
46
|
+
destination = destination / download_source.name
|
47
|
+
|
48
|
+
destination.parent.mkdir(parents=True, exist_ok=True)
|
49
|
+
if download_source.resolve() != destination.resolve():
|
50
|
+
shutil.copy2(download_source, destination)
|
51
|
+
|
52
|
+
return destination, summary, log_text
|
53
|
+
|
54
|
+
|
55
|
+
def _build_parser() -> argparse.ArgumentParser:
|
56
|
+
parser = argparse.ArgumentParser(
|
57
|
+
description="Send a video to a running talks-reducer server and download the result.",
|
58
|
+
)
|
59
|
+
parser.add_argument("input", type=Path, help="Path to the video file to upload.")
|
60
|
+
parser.add_argument(
|
61
|
+
"--server",
|
62
|
+
default="http://127.0.0.1:9005/",
|
63
|
+
help="Base URL for the talks-reducer server (default: http://127.0.0.1:9005/).",
|
64
|
+
)
|
65
|
+
parser.add_argument(
|
66
|
+
"--output",
|
67
|
+
type=Path,
|
68
|
+
default=None,
|
69
|
+
help="Where to store the processed video. Defaults to the working directory.",
|
70
|
+
)
|
71
|
+
parser.add_argument(
|
72
|
+
"--small",
|
73
|
+
action="store_true",
|
74
|
+
help="Toggle the 'Small video' preset before processing.",
|
75
|
+
)
|
76
|
+
parser.add_argument(
|
77
|
+
"--print-log",
|
78
|
+
action="store_true",
|
79
|
+
help="Print the server log after processing completes.",
|
80
|
+
)
|
81
|
+
return parser
|
82
|
+
|
83
|
+
|
84
|
+
def main(argv: Optional[Sequence[str]] = None) -> None:
|
85
|
+
parser = _build_parser()
|
86
|
+
args = parser.parse_args(argv)
|
87
|
+
|
88
|
+
destination, summary, log_text = send_video(
|
89
|
+
input_path=args.input.expanduser(),
|
90
|
+
output_path=args.output.expanduser() if args.output else None,
|
91
|
+
server_url=args.server,
|
92
|
+
small=args.small,
|
93
|
+
)
|
94
|
+
|
95
|
+
print(summary)
|
96
|
+
print(f"Saved processed video to {destination}")
|
97
|
+
if args.print_log:
|
98
|
+
print("\nServer log:\n" + log_text)
|
99
|
+
|
100
|
+
|
101
|
+
if __name__ == "__main__": # pragma: no cover
|
102
|
+
main()
|