talks-reducer 0.5.2__tar.gz → 0.5.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {talks_reducer-0.5.2/talks_reducer.egg-info → talks_reducer-0.5.5}/PKG-INFO +19 -1
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/README.md +18 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/talks_reducer/__about__.py +1 -1
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/talks_reducer/cli.py +91 -5
- talks_reducer-0.5.5/talks_reducer/discovery.py +119 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/talks_reducer/gui.py +234 -6
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/talks_reducer/server.py +1 -1
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/talks_reducer/server_tray.py +1 -1
- {talks_reducer-0.5.2 → talks_reducer-0.5.5/talks_reducer.egg-info}/PKG-INFO +19 -1
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/talks_reducer.egg-info/SOURCES.txt +2 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/tests/test_cli.py +45 -0
- talks_reducer-0.5.5/tests/test_discovery.py +52 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/LICENSE +0 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/pyproject.toml +0 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/setup.cfg +0 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/talks_reducer/__init__.py +0 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/talks_reducer/__main__.py +0 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/talks_reducer/audio.py +0 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/talks_reducer/chunks.py +0 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/talks_reducer/ffmpeg.py +0 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/talks_reducer/models.py +0 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/talks_reducer/pipeline.py +0 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/talks_reducer/progress.py +0 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/talks_reducer/service_client.py +0 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/talks_reducer.egg-info/dependency_links.txt +0 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/talks_reducer.egg-info/entry_points.txt +0 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/talks_reducer.egg-info/requires.txt +0 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/talks_reducer.egg-info/top_level.txt +0 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/tests/test_audio.py +0 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/tests/test_pipeline_service.py +0 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/tests/test_server.py +0 -0
- {talks_reducer-0.5.2 → talks_reducer-0.5.5}/tests/test_service_client.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: talks-reducer
|
3
|
-
Version: 0.5.
|
3
|
+
Version: 0.5.5
|
4
4
|
Summary: CLI for speeding up long-form talks by removing silence
|
5
5
|
Author: Talks Reducer Maintainers
|
6
6
|
License-Expression: MIT
|
@@ -70,6 +70,14 @@ Example CLI usage:
|
|
70
70
|
talks-reducer --small input.mp4
|
71
71
|
```
|
72
72
|
|
73
|
+
Need to offload work to a remote Talks Reducer server? Pass `--url` with the
|
74
|
+
server address and the CLI will upload the input, wait for processing to finish,
|
75
|
+
and download the rendered video:
|
76
|
+
|
77
|
+
```sh
|
78
|
+
talks-reducer --url http://localhost:9005 demo.mp4
|
79
|
+
```
|
80
|
+
|
73
81
|
### Speech detection
|
74
82
|
|
75
83
|
Talks Reducer now relies on its built-in volume thresholding to detect speech. Adjust `--silent_threshold` if you need to fine-tune when segments count as silence. Dropping the optional Silero VAD integration keeps the install lightweight and avoids pulling in PyTorch.
|
@@ -91,6 +99,10 @@ Want the server to live in your system tray instead of a terminal window? Use:
|
|
91
99
|
talks-reducer server-tray
|
92
100
|
```
|
93
101
|
|
102
|
+
Bundled Windows builds include the same behaviour: run
|
103
|
+
`talks-reducer.exe --server` to launch the tray-managed server directly from the
|
104
|
+
desktop shortcut without opening the GUI first.
|
105
|
+
|
94
106
|
Pass `--debug` to print verbose logs about the tray icon lifecycle, and
|
95
107
|
`--tray-mode pystray-detached` to try pystray's alternate detached runner. If
|
96
108
|
the icon backend refuses to appear, fall back to `--tray-mode headless` to keep
|
@@ -111,6 +123,12 @@ This opens a local web page featuring a drag-and-drop upload zone, a **Small vid
|
|
111
123
|
progress indicator, and automatic previews of the processed output. Once the job completes you can inspect the resulting compression
|
112
124
|
ratio and download the rendered video directly from the page.
|
113
125
|
|
126
|
+
The desktop GUI mirrors this behaviour. Open **Advanced** settings to provide a
|
127
|
+
server URL and click **Discover** to scan your local network for Talks Reducer
|
128
|
+
instances listening on port `9005`. Leaving the field blank keeps processing
|
129
|
+
local; selecting a discovered server delegates rendering to that machine while
|
130
|
+
the GUI downloads the finished files automatically.
|
131
|
+
|
114
132
|
### Uploading and retrieving a processed video
|
115
133
|
|
116
134
|
1. Open the printed `http://localhost:<port>` address (the default port is `9005`).
|
@@ -42,6 +42,14 @@ Example CLI usage:
|
|
42
42
|
talks-reducer --small input.mp4
|
43
43
|
```
|
44
44
|
|
45
|
+
Need to offload work to a remote Talks Reducer server? Pass `--url` with the
|
46
|
+
server address and the CLI will upload the input, wait for processing to finish,
|
47
|
+
and download the rendered video:
|
48
|
+
|
49
|
+
```sh
|
50
|
+
talks-reducer --url http://localhost:9005 demo.mp4
|
51
|
+
```
|
52
|
+
|
45
53
|
### Speech detection
|
46
54
|
|
47
55
|
Talks Reducer now relies on its built-in volume thresholding to detect speech. Adjust `--silent_threshold` if you need to fine-tune when segments count as silence. Dropping the optional Silero VAD integration keeps the install lightweight and avoids pulling in PyTorch.
|
@@ -63,6 +71,10 @@ Want the server to live in your system tray instead of a terminal window? Use:
|
|
63
71
|
talks-reducer server-tray
|
64
72
|
```
|
65
73
|
|
74
|
+
Bundled Windows builds include the same behaviour: run
|
75
|
+
`talks-reducer.exe --server` to launch the tray-managed server directly from the
|
76
|
+
desktop shortcut without opening the GUI first.
|
77
|
+
|
66
78
|
Pass `--debug` to print verbose logs about the tray icon lifecycle, and
|
67
79
|
`--tray-mode pystray-detached` to try pystray's alternate detached runner. If
|
68
80
|
the icon backend refuses to appear, fall back to `--tray-mode headless` to keep
|
@@ -83,6 +95,12 @@ This opens a local web page featuring a drag-and-drop upload zone, a **Small vid
|
|
83
95
|
progress indicator, and automatic previews of the processed output. Once the job completes you can inspect the resulting compression
|
84
96
|
ratio and download the rendered video directly from the page.
|
85
97
|
|
98
|
+
The desktop GUI mirrors this behaviour. Open **Advanced** settings to provide a
|
99
|
+
server URL and click **Discover** to scan your local network for Talks Reducer
|
100
|
+
instances listening on port `9005`. Leaving the field blank keeps processing
|
101
|
+
local; selecting a discovered server delegates rendering to that machine while
|
102
|
+
the GUI downloads the finished files automatically.
|
103
|
+
|
86
104
|
### Uploading and retrieving a processed video
|
87
105
|
|
88
106
|
1. Open the printed `http://localhost:<port>` address (the default port is `9005`).
|
@@ -98,6 +98,12 @@ def _build_parser() -> argparse.ArgumentParser:
|
|
98
98
|
action="store_true",
|
99
99
|
help="Apply small file optimizations: resize video to 720p, audio to 128k bitrate, best compression (uses CUDA if available).",
|
100
100
|
)
|
101
|
+
parser.add_argument(
|
102
|
+
"--url",
|
103
|
+
dest="server_url",
|
104
|
+
default=None,
|
105
|
+
help="Process videos via a Talks Reducer server at the provided base URL (for example, http://localhost:9005).",
|
106
|
+
)
|
101
107
|
return parser
|
102
108
|
|
103
109
|
|
@@ -128,6 +134,86 @@ def gather_input_files(paths: List[str]) -> List[str]:
|
|
128
134
|
return files
|
129
135
|
|
130
136
|
|
137
|
+
def _print_total_time(start_time: float) -> None:
|
138
|
+
"""Print the elapsed processing time since *start_time*."""
|
139
|
+
|
140
|
+
end_time = time.time()
|
141
|
+
total_time = end_time - start_time
|
142
|
+
hours, remainder = divmod(total_time, 3600)
|
143
|
+
minutes, seconds = divmod(remainder, 60)
|
144
|
+
print(f"\nTime: {int(hours)}h {int(minutes)}m {seconds:.2f}s")
|
145
|
+
|
146
|
+
|
147
|
+
def _process_via_server(
|
148
|
+
files: Sequence[str], parsed_args: argparse.Namespace, *, start_time: float
|
149
|
+
) -> None:
|
150
|
+
"""Upload *files* to the configured server and download the results."""
|
151
|
+
|
152
|
+
try:
|
153
|
+
from . import service_client
|
154
|
+
except ImportError as exc: # pragma: no cover - optional dependency guard
|
155
|
+
print(
|
156
|
+
"Server mode requires the gradio_client dependency." f" ({exc})",
|
157
|
+
file=sys.stderr,
|
158
|
+
)
|
159
|
+
sys.exit(1)
|
160
|
+
|
161
|
+
server_url = parsed_args.server_url
|
162
|
+
if not server_url:
|
163
|
+
return
|
164
|
+
|
165
|
+
output_override: Optional[Path] = None
|
166
|
+
if parsed_args.output_file and len(files) == 1:
|
167
|
+
output_override = Path(parsed_args.output_file).expanduser()
|
168
|
+
elif parsed_args.output_file and len(files) > 1:
|
169
|
+
print(
|
170
|
+
"Warning: --output is ignored when processing multiple files via the server.",
|
171
|
+
file=sys.stderr,
|
172
|
+
)
|
173
|
+
|
174
|
+
unsupported_options = []
|
175
|
+
for name in (
|
176
|
+
"silent_threshold",
|
177
|
+
"silent_speed",
|
178
|
+
"sounded_speed",
|
179
|
+
"frame_spreadage",
|
180
|
+
"sample_rate",
|
181
|
+
"temp_folder",
|
182
|
+
):
|
183
|
+
if getattr(parsed_args, name) is not None:
|
184
|
+
unsupported_options.append(f"--{name.replace('_', '-')}")
|
185
|
+
|
186
|
+
if unsupported_options:
|
187
|
+
print(
|
188
|
+
"Warning: the following options are ignored when using --url: "
|
189
|
+
+ ", ".join(sorted(unsupported_options)),
|
190
|
+
file=sys.stderr,
|
191
|
+
)
|
192
|
+
|
193
|
+
for index, file in enumerate(files, start=1):
|
194
|
+
basename = os.path.basename(file)
|
195
|
+
print(
|
196
|
+
f"Processing file {index}/{len(files)} '{basename}' via server {server_url}"
|
197
|
+
)
|
198
|
+
try:
|
199
|
+
destination, summary, log_text = service_client.send_video(
|
200
|
+
input_path=Path(file),
|
201
|
+
output_path=output_override,
|
202
|
+
server_url=server_url,
|
203
|
+
small=bool(parsed_args.small),
|
204
|
+
)
|
205
|
+
except Exception as exc: # pragma: no cover - network failure safeguard
|
206
|
+
print(f"Failed to process {basename}: {exc}", file=sys.stderr)
|
207
|
+
sys.exit(1)
|
208
|
+
|
209
|
+
print(summary)
|
210
|
+
print(f"Saved processed video to {destination}")
|
211
|
+
if log_text.strip():
|
212
|
+
print("\nServer log:\n" + log_text)
|
213
|
+
|
214
|
+
_print_total_time(start_time)
|
215
|
+
|
216
|
+
|
131
217
|
def _launch_gui(argv: Sequence[str]) -> bool:
|
132
218
|
"""Attempt to launch the GUI with the provided arguments."""
|
133
219
|
|
@@ -198,6 +284,10 @@ def main(argv: Optional[Sequence[str]] = None) -> None:
|
|
198
284
|
if len(files) > 1 and "output_file" in args:
|
199
285
|
del args["output_file"]
|
200
286
|
|
287
|
+
if parsed_args.server_url:
|
288
|
+
_process_via_server(files, parsed_args, start_time=start_time)
|
289
|
+
return
|
290
|
+
|
201
291
|
reporter = TqdmProgressReporter()
|
202
292
|
|
203
293
|
for index, file in enumerate(files):
|
@@ -241,11 +331,7 @@ def main(argv: Optional[Sequence[str]] = None) -> None:
|
|
241
331
|
if summary_parts:
|
242
332
|
reporter.log("Result: " + ", ".join(summary_parts))
|
243
333
|
|
244
|
-
|
245
|
-
total_time = end_time - start_time
|
246
|
-
hours, remainder = divmod(total_time, 3600)
|
247
|
-
minutes, seconds = divmod(remainder, 60)
|
248
|
-
print(f"\nTime: {int(hours)}h {int(minutes)}m {seconds:.2f}s")
|
334
|
+
_print_total_time(start_time)
|
249
335
|
|
250
336
|
|
251
337
|
if __name__ == "__main__":
|
@@ -0,0 +1,119 @@
|
|
1
|
+
"""Utilities for discovering Talks Reducer servers on the local network."""
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
import ipaddress
|
6
|
+
import socket
|
7
|
+
from concurrent.futures import ThreadPoolExecutor
|
8
|
+
from contextlib import closing
|
9
|
+
from http.client import HTTPConnection
|
10
|
+
from typing import Iterable, Iterator, List, Optional, Set
|
11
|
+
|
12
|
+
DEFAULT_PORT = 9005
|
13
|
+
DEFAULT_TIMEOUT = 0.4
|
14
|
+
|
15
|
+
|
16
|
+
def _iter_local_ipv4_addresses() -> Iterator[str]:
|
17
|
+
"""Yield IPv4 addresses that belong to the local machine."""
|
18
|
+
|
19
|
+
seen: Set[str] = set()
|
20
|
+
|
21
|
+
try:
|
22
|
+
hostname = socket.gethostname()
|
23
|
+
for info in socket.getaddrinfo(hostname, None, family=socket.AF_INET):
|
24
|
+
address = info[4][0]
|
25
|
+
if address and address not in seen:
|
26
|
+
seen.add(address)
|
27
|
+
yield address
|
28
|
+
except socket.gaierror:
|
29
|
+
pass
|
30
|
+
|
31
|
+
for probe in ("8.8.8.8", "1.1.1.1"):
|
32
|
+
try:
|
33
|
+
with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as sock:
|
34
|
+
sock.connect((probe, 80))
|
35
|
+
address = sock.getsockname()[0]
|
36
|
+
if address and address not in seen:
|
37
|
+
seen.add(address)
|
38
|
+
yield address
|
39
|
+
except OSError:
|
40
|
+
continue
|
41
|
+
|
42
|
+
|
43
|
+
def _build_default_host_candidates(prefix_length: int = 24) -> List[str]:
|
44
|
+
"""Return a list of host candidates based on detected local networks."""
|
45
|
+
|
46
|
+
hosts: Set[str] = {"127.0.0.1", "localhost"}
|
47
|
+
|
48
|
+
for address in _iter_local_ipv4_addresses():
|
49
|
+
hosts.add(address)
|
50
|
+
try:
|
51
|
+
network = ipaddress.ip_network(f"{address}/{prefix_length}", strict=False)
|
52
|
+
except ValueError:
|
53
|
+
continue
|
54
|
+
for host in network.hosts():
|
55
|
+
hosts.add(str(host))
|
56
|
+
|
57
|
+
return sorted(hosts)
|
58
|
+
|
59
|
+
|
60
|
+
def _probe_host(host: str, port: int, timeout: float) -> Optional[str]:
|
61
|
+
"""Return the URL if *host* responds on *port* within *timeout* seconds."""
|
62
|
+
|
63
|
+
connection: Optional[HTTPConnection] = None
|
64
|
+
try:
|
65
|
+
connection = HTTPConnection(host, port, timeout=timeout)
|
66
|
+
connection.request(
|
67
|
+
"GET", "/", headers={"User-Agent": "talks-reducer-discovery"}
|
68
|
+
)
|
69
|
+
response = connection.getresponse()
|
70
|
+
# Drain the response to avoid ResourceWarning in some Python versions.
|
71
|
+
response.read()
|
72
|
+
if 200 <= response.status < 500:
|
73
|
+
return f"http://{host}:{port}/"
|
74
|
+
except OSError:
|
75
|
+
return None
|
76
|
+
finally:
|
77
|
+
if connection is not None:
|
78
|
+
try:
|
79
|
+
connection.close()
|
80
|
+
except Exception:
|
81
|
+
pass
|
82
|
+
return None
|
83
|
+
|
84
|
+
|
85
|
+
def discover_servers(
|
86
|
+
*,
|
87
|
+
port: int = DEFAULT_PORT,
|
88
|
+
timeout: float = DEFAULT_TIMEOUT,
|
89
|
+
hosts: Optional[Iterable[str]] = None,
|
90
|
+
) -> List[str]:
|
91
|
+
"""Scan *hosts* for running Talks Reducer servers on *port*.
|
92
|
+
|
93
|
+
When *hosts* is omitted, the local /24 networks derived from available IPv4
|
94
|
+
addresses are scanned. ``localhost`` and ``127.0.0.1`` are always included.
|
95
|
+
The function returns a sorted list of unique base URLs.
|
96
|
+
"""
|
97
|
+
|
98
|
+
if hosts is None:
|
99
|
+
candidates = _build_default_host_candidates()
|
100
|
+
else:
|
101
|
+
candidates = sorted(set(hosts))
|
102
|
+
if "127.0.0.1" not in candidates:
|
103
|
+
candidates.append("127.0.0.1")
|
104
|
+
if "localhost" not in candidates:
|
105
|
+
candidates.append("localhost")
|
106
|
+
|
107
|
+
results: List[str] = []
|
108
|
+
|
109
|
+
with ThreadPoolExecutor(max_workers=32) as executor:
|
110
|
+
for url in executor.map(
|
111
|
+
lambda host: _probe_host(host, port, timeout), candidates
|
112
|
+
):
|
113
|
+
if url and url not in results:
|
114
|
+
results.append(url)
|
115
|
+
|
116
|
+
return results
|
117
|
+
|
118
|
+
|
119
|
+
__all__ = ["discover_servers"]
|
@@ -3,6 +3,7 @@
|
|
3
3
|
from __future__ import annotations
|
4
4
|
|
5
5
|
import argparse
|
6
|
+
import importlib
|
6
7
|
import json
|
7
8
|
import os
|
8
9
|
import re
|
@@ -20,6 +21,7 @@ if TYPE_CHECKING:
|
|
20
21
|
try:
|
21
22
|
from .cli import gather_input_files
|
22
23
|
from .cli import main as cli_main
|
24
|
+
from .discovery import discover_servers
|
23
25
|
from .ffmpeg import FFmpegNotFoundError
|
24
26
|
from .models import ProcessingOptions, default_temp_folder
|
25
27
|
from .pipeline import speed_up_video
|
@@ -34,6 +36,7 @@ except ImportError: # pragma: no cover - handled at runtime
|
|
34
36
|
|
35
37
|
from talks_reducer.cli import gather_input_files
|
36
38
|
from talks_reducer.cli import main as cli_main
|
39
|
+
from talks_reducer.discovery import discover_servers
|
37
40
|
from talks_reducer.ffmpeg import FFmpegNotFoundError
|
38
41
|
from talks_reducer.models import ProcessingOptions, default_temp_folder
|
39
42
|
from talks_reducer.pipeline import speed_up_video
|
@@ -356,6 +359,11 @@ class TalksReducerGUI:
|
|
356
359
|
self.open_after_convert_var.trace_add(
|
357
360
|
"write", self._on_open_after_convert_change
|
358
361
|
)
|
362
|
+
self.server_url_var = tk.StringVar(
|
363
|
+
value=str(self._get_setting("server_url", ""))
|
364
|
+
)
|
365
|
+
self.server_url_var.trace_add("write", self._on_server_url_change)
|
366
|
+
self._discovery_thread: Optional[threading.Thread] = None
|
359
367
|
|
360
368
|
self._build_layout()
|
361
369
|
self._apply_simple_mode(initial=True)
|
@@ -538,11 +546,23 @@ class TalksReducerGUI:
|
|
538
546
|
self.sample_rate_var = self.tk.StringVar(value="48000")
|
539
547
|
self._add_entry(self.advanced_frame, "Sample rate", self.sample_rate_var, row=6)
|
540
548
|
|
549
|
+
self.ttk.Label(self.advanced_frame, text="Server URL").grid(
|
550
|
+
row=7, column=0, sticky="w", pady=4
|
551
|
+
)
|
552
|
+
self.server_url_entry = self.ttk.Entry(
|
553
|
+
self.advanced_frame, textvariable=self.server_url_var
|
554
|
+
)
|
555
|
+
self.server_url_entry.grid(row=7, column=1, sticky="ew", pady=4)
|
556
|
+
self.server_discover_button = self.ttk.Button(
|
557
|
+
self.advanced_frame, text="Discover", command=self._start_discovery
|
558
|
+
)
|
559
|
+
self.server_discover_button.grid(row=7, column=2, padx=(8, 0))
|
560
|
+
|
541
561
|
self.ttk.Label(self.advanced_frame, text="Theme").grid(
|
542
|
-
row=
|
562
|
+
row=8, column=0, sticky="w", pady=(8, 0)
|
543
563
|
)
|
544
564
|
theme_choice = self.ttk.Frame(self.advanced_frame)
|
545
|
-
theme_choice.grid(row=
|
565
|
+
theme_choice.grid(row=8, column=1, columnspan=2, sticky="w", pady=(8, 0))
|
546
566
|
for value, label in ("os", "OS"), ("light", "Light"), ("dark", "Dark"):
|
547
567
|
self.ttk.Radiobutton(
|
548
568
|
theme_choice,
|
@@ -644,6 +664,110 @@ class TalksReducerGUI:
|
|
644
664
|
)
|
645
665
|
button.grid(row=row, column=2, padx=(8, 0))
|
646
666
|
|
667
|
+
def _start_discovery(self) -> None:
|
668
|
+
"""Search the local network for running Talks Reducer servers."""
|
669
|
+
|
670
|
+
if self._discovery_thread and self._discovery_thread.is_alive():
|
671
|
+
return
|
672
|
+
|
673
|
+
self.server_discover_button.configure(
|
674
|
+
state=self.tk.DISABLED, text="Discovering…"
|
675
|
+
)
|
676
|
+
self._append_log("Discovering Talks Reducer servers on port 9005…")
|
677
|
+
|
678
|
+
def worker() -> None:
|
679
|
+
try:
|
680
|
+
urls = discover_servers()
|
681
|
+
except Exception as exc: # pragma: no cover - network failure safeguard
|
682
|
+
self._notify(lambda: self._on_discovery_failed(exc))
|
683
|
+
return
|
684
|
+
self._notify(lambda: self._on_discovery_complete(urls))
|
685
|
+
|
686
|
+
self._discovery_thread = threading.Thread(target=worker, daemon=True)
|
687
|
+
self._discovery_thread.start()
|
688
|
+
|
689
|
+
def _on_discovery_failed(self, exc: Exception) -> None:
|
690
|
+
self.server_discover_button.configure(state=self.tk.NORMAL, text="Discover")
|
691
|
+
message = f"Discovery failed: {exc}"
|
692
|
+
self._append_log(message)
|
693
|
+
self.messagebox.showerror("Discovery failed", message)
|
694
|
+
|
695
|
+
def _on_discovery_complete(self, urls: List[str]) -> None:
|
696
|
+
self.server_discover_button.configure(state=self.tk.NORMAL, text="Discover")
|
697
|
+
if not urls:
|
698
|
+
self._append_log("No Talks Reducer servers were found.")
|
699
|
+
self.messagebox.showinfo(
|
700
|
+
"No servers found",
|
701
|
+
"No Talks Reducer servers responded on port 9005.",
|
702
|
+
)
|
703
|
+
return
|
704
|
+
|
705
|
+
self._append_log(
|
706
|
+
f"Discovered {len(urls)} server{'s' if len(urls) != 1 else ''}."
|
707
|
+
)
|
708
|
+
|
709
|
+
if len(urls) == 1:
|
710
|
+
self.server_url_var.set(urls[0])
|
711
|
+
return
|
712
|
+
|
713
|
+
self._show_discovery_results(urls)
|
714
|
+
|
715
|
+
def _show_discovery_results(self, urls: List[str]) -> None:
|
716
|
+
dialog = self.tk.Toplevel(self.root)
|
717
|
+
dialog.title("Select server")
|
718
|
+
dialog.transient(self.root)
|
719
|
+
dialog.grab_set()
|
720
|
+
|
721
|
+
self.ttk.Label(dialog, text="Select a Talks Reducer server:").grid(
|
722
|
+
row=0, column=0, columnspan=2, sticky="w", padx=self.PADDING, pady=(12, 4)
|
723
|
+
)
|
724
|
+
|
725
|
+
listbox = self.tk.Listbox(
|
726
|
+
dialog,
|
727
|
+
height=min(10, len(urls)),
|
728
|
+
selectmode=self.tk.SINGLE,
|
729
|
+
)
|
730
|
+
listbox.grid(
|
731
|
+
row=1,
|
732
|
+
column=0,
|
733
|
+
columnspan=2,
|
734
|
+
padx=self.PADDING,
|
735
|
+
sticky="nsew",
|
736
|
+
)
|
737
|
+
dialog.columnconfigure(0, weight=1)
|
738
|
+
dialog.columnconfigure(1, weight=1)
|
739
|
+
dialog.rowconfigure(1, weight=1)
|
740
|
+
|
741
|
+
for url in urls:
|
742
|
+
listbox.insert(self.tk.END, url)
|
743
|
+
listbox.select_set(0)
|
744
|
+
|
745
|
+
def choose(_: object | None = None) -> None:
|
746
|
+
selection = listbox.curselection()
|
747
|
+
if not selection:
|
748
|
+
return
|
749
|
+
index = selection[0]
|
750
|
+
self.server_url_var.set(urls[index])
|
751
|
+
dialog.grab_release()
|
752
|
+
dialog.destroy()
|
753
|
+
|
754
|
+
def cancel() -> None:
|
755
|
+
dialog.grab_release()
|
756
|
+
dialog.destroy()
|
757
|
+
|
758
|
+
listbox.bind("<Double-Button-1>", choose)
|
759
|
+
listbox.bind("<Return>", choose)
|
760
|
+
|
761
|
+
button_frame = self.ttk.Frame(dialog)
|
762
|
+
button_frame.grid(row=2, column=0, columnspan=2, pady=(8, 12))
|
763
|
+
self.ttk.Button(button_frame, text="Use server", command=choose).pack(
|
764
|
+
side=self.tk.LEFT, padx=(0, 8)
|
765
|
+
)
|
766
|
+
self.ttk.Button(button_frame, text="Cancel", command=cancel).pack(
|
767
|
+
side=self.tk.LEFT
|
768
|
+
)
|
769
|
+
dialog.protocol("WM_DELETE_WINDOW", cancel)
|
770
|
+
|
647
771
|
def _toggle_simple_mode(self) -> None:
|
648
772
|
self._update_setting("simple_mode", self.simple_mode_var.get())
|
649
773
|
self._apply_simple_mode()
|
@@ -726,6 +850,10 @@ class TalksReducerGUI:
|
|
726
850
|
"open_after_convert", bool(self.open_after_convert_var.get())
|
727
851
|
)
|
728
852
|
|
853
|
+
def _on_server_url_change(self, *_: object) -> None:
|
854
|
+
value = self.server_url_var.get().strip()
|
855
|
+
self._update_setting("server_url", value)
|
856
|
+
|
729
857
|
def _apply_theme(self) -> None:
|
730
858
|
preference = self.theme_var.get().lower()
|
731
859
|
if preference not in {"light", "dark"}:
|
@@ -1017,14 +1145,12 @@ class TalksReducerGUI:
|
|
1017
1145
|
self._append_log("Starting processing…")
|
1018
1146
|
self._stop_requested = False
|
1019
1147
|
open_after_convert = bool(self.open_after_convert_var.get())
|
1148
|
+
server_url = self.server_url_var.get().strip()
|
1020
1149
|
|
1021
1150
|
def worker() -> None:
|
1022
1151
|
def set_process(proc: subprocess.Popen) -> None:
|
1023
1152
|
self._ffmpeg_process = proc
|
1024
1153
|
|
1025
|
-
reporter = _TkProgressReporter(
|
1026
|
-
self._append_log, process_callback=set_process
|
1027
|
-
)
|
1028
1154
|
try:
|
1029
1155
|
files = gather_input_files(self.input_files)
|
1030
1156
|
if not files:
|
@@ -1036,6 +1162,20 @@ class TalksReducerGUI:
|
|
1036
1162
|
self._set_status("Idle")
|
1037
1163
|
return
|
1038
1164
|
|
1165
|
+
if server_url:
|
1166
|
+
success = self._process_files_via_server(
|
1167
|
+
files,
|
1168
|
+
args,
|
1169
|
+
server_url,
|
1170
|
+
open_after_convert=open_after_convert,
|
1171
|
+
)
|
1172
|
+
if success:
|
1173
|
+
self._notify(self._hide_stop_button)
|
1174
|
+
return
|
1175
|
+
|
1176
|
+
reporter = _TkProgressReporter(
|
1177
|
+
self._append_log, process_callback=set_process
|
1178
|
+
)
|
1039
1179
|
for index, file in enumerate(files, start=1):
|
1040
1180
|
self._append_log(
|
1041
1181
|
f"Processing {index}/{len(files)}: {os.path.basename(file)}"
|
@@ -1085,7 +1225,10 @@ class TalksReducerGUI:
|
|
1085
1225
|
self._processing_thread.start()
|
1086
1226
|
|
1087
1227
|
# Show Stop button when processing starts
|
1088
|
-
|
1228
|
+
if server_url:
|
1229
|
+
self.stop_button.grid_remove()
|
1230
|
+
else:
|
1231
|
+
self.stop_button.grid()
|
1089
1232
|
|
1090
1233
|
def _stop_processing(self) -> None:
|
1091
1234
|
"""Stop the currently running processing by terminating FFmpeg."""
|
@@ -1153,6 +1296,80 @@ class TalksReducerGUI:
|
|
1153
1296
|
args["small"] = True
|
1154
1297
|
return args
|
1155
1298
|
|
1299
|
+
def _process_files_via_server(
|
1300
|
+
self,
|
1301
|
+
files: List[str],
|
1302
|
+
args: dict[str, object],
|
1303
|
+
server_url: str,
|
1304
|
+
*,
|
1305
|
+
open_after_convert: bool,
|
1306
|
+
) -> bool:
|
1307
|
+
"""Send *files* to the configured server for processing."""
|
1308
|
+
|
1309
|
+
try:
|
1310
|
+
service_module = importlib.import_module("talks_reducer.service_client")
|
1311
|
+
except ModuleNotFoundError as exc:
|
1312
|
+
self._append_log(f"Server client unavailable: {exc}")
|
1313
|
+
self._notify(
|
1314
|
+
lambda: self.messagebox.showerror(
|
1315
|
+
"Server unavailable",
|
1316
|
+
"Remote processing requires the gradio_client package.",
|
1317
|
+
)
|
1318
|
+
)
|
1319
|
+
self._notify(lambda: self._set_status("Error"))
|
1320
|
+
return False
|
1321
|
+
|
1322
|
+
output_override = args.get("output_file") if len(files) == 1 else None
|
1323
|
+
ignored = [key for key in args if key not in {"output_file", "small"}]
|
1324
|
+
if ignored:
|
1325
|
+
ignored_options = ", ".join(sorted(ignored))
|
1326
|
+
self._append_log(
|
1327
|
+
f"Server mode ignores the following options: {ignored_options}"
|
1328
|
+
)
|
1329
|
+
|
1330
|
+
for index, file in enumerate(files, start=1):
|
1331
|
+
basename = os.path.basename(file)
|
1332
|
+
self._append_log(
|
1333
|
+
f"Uploading {index}/{len(files)}: {basename} to {server_url}"
|
1334
|
+
)
|
1335
|
+
try:
|
1336
|
+
destination, summary, log_text = service_module.send_video(
|
1337
|
+
input_path=Path(file),
|
1338
|
+
output_path=output_override,
|
1339
|
+
server_url=server_url,
|
1340
|
+
small=bool(args.get("small", False)),
|
1341
|
+
)
|
1342
|
+
except Exception as exc: # pragma: no cover - network safeguard
|
1343
|
+
error_msg = f"Processing failed: {exc}"
|
1344
|
+
self._append_log(error_msg)
|
1345
|
+
self._notify(lambda: self._set_status("Error"))
|
1346
|
+
self._notify(
|
1347
|
+
lambda: self.messagebox.showerror(
|
1348
|
+
"Server error",
|
1349
|
+
f"Failed to process {basename}: {exc}",
|
1350
|
+
)
|
1351
|
+
)
|
1352
|
+
return False
|
1353
|
+
|
1354
|
+
self._last_output = Path(destination)
|
1355
|
+
self._last_time_ratio = None
|
1356
|
+
self._last_size_ratio = None
|
1357
|
+
for line in summary.splitlines():
|
1358
|
+
self._append_log(line)
|
1359
|
+
if log_text.strip():
|
1360
|
+
self._append_log("Server log:")
|
1361
|
+
for line in log_text.splitlines():
|
1362
|
+
self._append_log(line)
|
1363
|
+
if open_after_convert:
|
1364
|
+
self._notify(
|
1365
|
+
lambda path=self._last_output: self._open_in_file_manager(path)
|
1366
|
+
)
|
1367
|
+
|
1368
|
+
self._append_log("All jobs finished successfully.")
|
1369
|
+
self._notify(lambda: self.open_button.configure(state=self.tk.NORMAL))
|
1370
|
+
self._notify(self._clear_input_files)
|
1371
|
+
return True
|
1372
|
+
|
1156
1373
|
def _parse_float(self, value: str, label: str) -> float:
|
1157
1374
|
try:
|
1158
1375
|
return float(value)
|
@@ -1526,9 +1743,20 @@ def main(argv: Optional[Sequence[str]] = None) -> bool:
|
|
1526
1743
|
action="store_true",
|
1527
1744
|
help="Do not start the Talks Reducer server tray alongside the GUI.",
|
1528
1745
|
)
|
1746
|
+
parser.add_argument(
|
1747
|
+
"--server",
|
1748
|
+
action="store_true",
|
1749
|
+
help="Launch the Talks Reducer server tray instead of the desktop GUI.",
|
1750
|
+
)
|
1529
1751
|
|
1530
1752
|
parsed_args, remaining = parser.parse_known_args(argv)
|
1531
1753
|
no_tray = parsed_args.no_tray
|
1754
|
+
if parsed_args.server:
|
1755
|
+
package_name = __package__ or "talks_reducer"
|
1756
|
+
tray_module = importlib.import_module(f"{package_name}.server_tray")
|
1757
|
+
tray_main = getattr(tray_module, "main")
|
1758
|
+
tray_main(remaining)
|
1759
|
+
return False
|
1532
1760
|
argv = remaining
|
1533
1761
|
|
1534
1762
|
if argv:
|
@@ -309,7 +309,7 @@ def main(argv: Optional[Sequence[str]] = None) -> None:
|
|
309
309
|
|
310
310
|
parser = argparse.ArgumentParser(description="Launch the Talks Reducer web UI.")
|
311
311
|
parser.add_argument(
|
312
|
-
"--host", dest="host", default=
|
312
|
+
"--host", dest="host", default="0.0.0.0", help="Custom host to bind."
|
313
313
|
)
|
314
314
|
parser.add_argument(
|
315
315
|
"--port",
|
@@ -35,7 +35,7 @@ def _guess_local_url(host: Optional[str], port: int) -> str:
|
|
35
35
|
"""Return the URL the server is most likely reachable at locally."""
|
36
36
|
|
37
37
|
if host in (None, "", "0.0.0.0", "::"):
|
38
|
-
hostname = "
|
38
|
+
hostname = "0.0.0.0"
|
39
39
|
else:
|
40
40
|
hostname = host
|
41
41
|
return f"http://{hostname}:{port}/"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: talks-reducer
|
3
|
-
Version: 0.5.
|
3
|
+
Version: 0.5.5
|
4
4
|
Summary: CLI for speeding up long-form talks by removing silence
|
5
5
|
Author: Talks Reducer Maintainers
|
6
6
|
License-Expression: MIT
|
@@ -70,6 +70,14 @@ Example CLI usage:
|
|
70
70
|
talks-reducer --small input.mp4
|
71
71
|
```
|
72
72
|
|
73
|
+
Need to offload work to a remote Talks Reducer server? Pass `--url` with the
|
74
|
+
server address and the CLI will upload the input, wait for processing to finish,
|
75
|
+
and download the rendered video:
|
76
|
+
|
77
|
+
```sh
|
78
|
+
talks-reducer --url http://localhost:9005 demo.mp4
|
79
|
+
```
|
80
|
+
|
73
81
|
### Speech detection
|
74
82
|
|
75
83
|
Talks Reducer now relies on its built-in volume thresholding to detect speech. Adjust `--silent_threshold` if you need to fine-tune when segments count as silence. Dropping the optional Silero VAD integration keeps the install lightweight and avoids pulling in PyTorch.
|
@@ -91,6 +99,10 @@ Want the server to live in your system tray instead of a terminal window? Use:
|
|
91
99
|
talks-reducer server-tray
|
92
100
|
```
|
93
101
|
|
102
|
+
Bundled Windows builds include the same behaviour: run
|
103
|
+
`talks-reducer.exe --server` to launch the tray-managed server directly from the
|
104
|
+
desktop shortcut without opening the GUI first.
|
105
|
+
|
94
106
|
Pass `--debug` to print verbose logs about the tray icon lifecycle, and
|
95
107
|
`--tray-mode pystray-detached` to try pystray's alternate detached runner. If
|
96
108
|
the icon backend refuses to appear, fall back to `--tray-mode headless` to keep
|
@@ -111,6 +123,12 @@ This opens a local web page featuring a drag-and-drop upload zone, a **Small vid
|
|
111
123
|
progress indicator, and automatic previews of the processed output. Once the job completes you can inspect the resulting compression
|
112
124
|
ratio and download the rendered video directly from the page.
|
113
125
|
|
126
|
+
The desktop GUI mirrors this behaviour. Open **Advanced** settings to provide a
|
127
|
+
server URL and click **Discover** to scan your local network for Talks Reducer
|
128
|
+
instances listening on port `9005`. Leaving the field blank keeps processing
|
129
|
+
local; selecting a discovered server delegates rendering to that machine while
|
130
|
+
the GUI downloads the finished files automatically.
|
131
|
+
|
114
132
|
### Uploading and retrieving a processed video
|
115
133
|
|
116
134
|
1. Open the printed `http://localhost:<port>` address (the default port is `9005`).
|
@@ -7,6 +7,7 @@ talks_reducer/__main__.py
|
|
7
7
|
talks_reducer/audio.py
|
8
8
|
talks_reducer/chunks.py
|
9
9
|
talks_reducer/cli.py
|
10
|
+
talks_reducer/discovery.py
|
10
11
|
talks_reducer/ffmpeg.py
|
11
12
|
talks_reducer/gui.py
|
12
13
|
talks_reducer/models.py
|
@@ -23,6 +24,7 @@ talks_reducer.egg-info/requires.txt
|
|
23
24
|
talks_reducer.egg-info/top_level.txt
|
24
25
|
tests/test_audio.py
|
25
26
|
tests/test_cli.py
|
27
|
+
tests/test_discovery.py
|
26
28
|
tests/test_pipeline_service.py
|
27
29
|
tests/test_server.py
|
28
30
|
tests/test_service_client.py
|
@@ -44,6 +44,7 @@ def test_main_runs_cli_with_arguments(monkeypatch: pytest.MonkeyPatch) -> None:
|
|
44
44
|
frame_spreadage=None,
|
45
45
|
sample_rate=None,
|
46
46
|
small=False,
|
47
|
+
server_url=None,
|
47
48
|
)
|
48
49
|
|
49
50
|
parser_mock = mock.Mock()
|
@@ -103,3 +104,47 @@ def test_main_exits_when_server_unavailable(monkeypatch: pytest.MonkeyPatch) ->
|
|
103
104
|
|
104
105
|
with pytest.raises(SystemExit):
|
105
106
|
cli.main(["server"])
|
107
|
+
|
108
|
+
|
109
|
+
def test_main_uses_remote_server_when_url_provided(
|
110
|
+
monkeypatch: pytest.MonkeyPatch,
|
111
|
+
) -> None:
|
112
|
+
"""The CLI should delegate to the remote server client when --url is set."""
|
113
|
+
|
114
|
+
parsed_args = SimpleNamespace(
|
115
|
+
input_file=["input.mp4"],
|
116
|
+
output_file=None,
|
117
|
+
temp_folder=None,
|
118
|
+
silent_threshold=None,
|
119
|
+
silent_speed=None,
|
120
|
+
sounded_speed=None,
|
121
|
+
frame_spreadage=None,
|
122
|
+
sample_rate=None,
|
123
|
+
small=True,
|
124
|
+
server_url="http://localhost:9005/",
|
125
|
+
)
|
126
|
+
|
127
|
+
parser_mock = mock.Mock()
|
128
|
+
parser_mock.parse_args.return_value = parsed_args
|
129
|
+
|
130
|
+
def fake_gather_input_files(_paths: list[str]) -> list[str]:
|
131
|
+
return ["/tmp/input.mp4"]
|
132
|
+
|
133
|
+
calls: list[SimpleNamespace] = []
|
134
|
+
|
135
|
+
def fake_send_video(**kwargs):
|
136
|
+
calls.append(SimpleNamespace(**kwargs))
|
137
|
+
return Path("/tmp/result.mp4"), "Summary", "Log"
|
138
|
+
|
139
|
+
monkeypatch.setattr(cli, "_build_parser", lambda: parser_mock)
|
140
|
+
monkeypatch.setattr(cli, "gather_input_files", fake_gather_input_files)
|
141
|
+
monkeypatch.setattr(cli, "_launch_gui", lambda argv: False)
|
142
|
+
import talks_reducer.service_client as service_client_module
|
143
|
+
|
144
|
+
monkeypatch.setattr(service_client_module, "send_video", fake_send_video)
|
145
|
+
|
146
|
+
cli.main(["input.mp4", "--url", "http://localhost:9005/"])
|
147
|
+
|
148
|
+
assert len(calls) == 1
|
149
|
+
assert calls[0].input_path == Path("/tmp/input.mp4")
|
150
|
+
assert calls[0].small is True
|
@@ -0,0 +1,52 @@
|
|
1
|
+
"""Tests for the network discovery helper utilities."""
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
import http.server
|
6
|
+
import threading
|
7
|
+
from contextlib import contextmanager
|
8
|
+
|
9
|
+
from talks_reducer import discovery
|
10
|
+
|
11
|
+
|
12
|
+
@contextmanager
|
13
|
+
def _http_server() -> tuple[str, int]:
|
14
|
+
"""Start a lightweight HTTP server for discovery tests."""
|
15
|
+
|
16
|
+
class Handler(http.server.BaseHTTPRequestHandler):
|
17
|
+
def do_GET(self) -> None: # pragma: no cover - exercised via discovery
|
18
|
+
self.send_response(200)
|
19
|
+
self.end_headers()
|
20
|
+
self.wfile.write(b"OK")
|
21
|
+
|
22
|
+
def log_message(self, *args, **kwargs): # type: ignore[override]
|
23
|
+
return
|
24
|
+
|
25
|
+
server = http.server.ThreadingHTTPServer(("127.0.0.1", 0), Handler)
|
26
|
+
host, port = server.server_address
|
27
|
+
|
28
|
+
thread = threading.Thread(target=server.serve_forever, daemon=True)
|
29
|
+
thread.start()
|
30
|
+
|
31
|
+
try:
|
32
|
+
yield host, port
|
33
|
+
finally:
|
34
|
+
server.shutdown()
|
35
|
+
thread.join(timeout=2)
|
36
|
+
|
37
|
+
|
38
|
+
def test_discover_servers_detects_running_instance() -> None:
|
39
|
+
"""The discovery helper should report a reachable local server."""
|
40
|
+
|
41
|
+
with _http_server() as (host, port):
|
42
|
+
results = discovery.discover_servers(port=port, hosts=[host])
|
43
|
+
|
44
|
+
expected_url = f"http://{host}:{port}/"
|
45
|
+
assert expected_url in results
|
46
|
+
|
47
|
+
|
48
|
+
def test_discover_servers_handles_missing_hosts() -> None:
|
49
|
+
"""Scanning an unreachable host should return an empty result list."""
|
50
|
+
|
51
|
+
results = discovery.discover_servers(port=65500, hosts=["192.0.2.123"])
|
52
|
+
assert results == []
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|