talks-reducer 0.5.3__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,4 +2,4 @@
2
2
 
3
3
  __all__ = ["__version__"]
4
4
 
5
- __version__ = "0.5.3"
5
+ __version__ = "0.6.0"
talks_reducer/cli.py CHANGED
@@ -4,12 +4,14 @@ from __future__ import annotations
4
4
 
5
5
  import argparse
6
6
  import os
7
+ import shutil
8
+ import subprocess
7
9
  import sys
8
10
  import time
9
11
  from importlib import import_module
10
12
  from importlib.metadata import PackageNotFoundError, version
11
13
  from pathlib import Path
12
- from typing import Dict, List, Optional, Sequence
14
+ from typing import Dict, List, Optional, Sequence, Tuple
13
15
 
14
16
  from . import audio
15
17
 
@@ -98,6 +100,23 @@ def _build_parser() -> argparse.ArgumentParser:
98
100
  action="store_true",
99
101
  help="Apply small file optimizations: resize video to 720p, audio to 128k bitrate, best compression (uses CUDA if available).",
100
102
  )
103
+ parser.add_argument(
104
+ "--url",
105
+ dest="server_url",
106
+ default=None,
107
+ help="Process videos via a Talks Reducer server at the provided base URL (for example, http://localhost:9005).",
108
+ )
109
+ parser.add_argument(
110
+ "--host",
111
+ dest="host",
112
+ default=None,
113
+ help="Shortcut for --url when targeting a Talks Reducer server on port 9005 (for example, localhost).",
114
+ )
115
+ parser.add_argument(
116
+ "--server-stream",
117
+ action="store_true",
118
+ help="Stream remote progress updates when using --url.",
119
+ )
101
120
  return parser
102
121
 
103
122
 
@@ -128,6 +147,122 @@ def gather_input_files(paths: List[str]) -> List[str]:
128
147
  return files
129
148
 
130
149
 
150
+ def _print_total_time(start_time: float) -> None:
151
+ """Print the elapsed processing time since *start_time*."""
152
+
153
+ end_time = time.time()
154
+ total_time = end_time - start_time
155
+ hours, remainder = divmod(total_time, 3600)
156
+ minutes, seconds = divmod(remainder, 60)
157
+ print(f"\nTime: {int(hours)}h {int(minutes)}m {seconds:.2f}s")
158
+
159
+
160
+ def _process_via_server(
161
+ files: Sequence[str], parsed_args: argparse.Namespace, *, start_time: float
162
+ ) -> Tuple[bool, Optional[str]]:
163
+ """Upload *files* to the configured server and download the results.
164
+
165
+ Returns a tuple of (success, error_message). When *success* is ``False``,
166
+ ``error_message`` contains the reason and the caller should fall back to the
167
+ local processing pipeline.
168
+ """
169
+
170
+ try:
171
+ from . import service_client
172
+ except ImportError as exc: # pragma: no cover - optional dependency guard
173
+ return False, ("Server mode requires the gradio_client dependency. " f"({exc})")
174
+
175
+ server_url = parsed_args.server_url
176
+ if not server_url:
177
+ return False, "Server URL was not provided."
178
+
179
+ output_override: Optional[Path] = None
180
+ if parsed_args.output_file and len(files) == 1:
181
+ output_override = Path(parsed_args.output_file).expanduser()
182
+ elif parsed_args.output_file and len(files) > 1:
183
+ print(
184
+ "Warning: --output is ignored when processing multiple files via the server.",
185
+ file=sys.stderr,
186
+ )
187
+
188
+ unsupported_options = []
189
+ for name in (
190
+ "silent_threshold",
191
+ "silent_speed",
192
+ "sounded_speed",
193
+ "frame_spreadage",
194
+ "sample_rate",
195
+ "temp_folder",
196
+ ):
197
+ if getattr(parsed_args, name) is not None:
198
+ unsupported_options.append(f"--{name.replace('_', '-')}")
199
+
200
+ if unsupported_options:
201
+ print(
202
+ "Warning: the following options are ignored when using --url: "
203
+ + ", ".join(sorted(unsupported_options)),
204
+ file=sys.stderr,
205
+ )
206
+
207
+ for index, file in enumerate(files, start=1):
208
+ basename = os.path.basename(file)
209
+ print(
210
+ f"Processing file {index}/{len(files)} '{basename}' via server {server_url}"
211
+ )
212
+ printed_log_header = False
213
+ progress_state: dict[str, tuple[Optional[int], Optional[int], str]] = {}
214
+ stream_updates = bool(getattr(parsed_args, "server_stream", False))
215
+
216
+ def _stream_server_log(line: str) -> None:
217
+ nonlocal printed_log_header
218
+ if not printed_log_header:
219
+ print("\nServer log:", flush=True)
220
+ printed_log_header = True
221
+ print(line, flush=True)
222
+
223
+ def _stream_progress(
224
+ desc: str, current: Optional[int], total: Optional[int], unit: str
225
+ ) -> None:
226
+ key = desc or "Processing"
227
+ state = (current, total, unit)
228
+ if progress_state.get(key) == state:
229
+ return
230
+ progress_state[key] = state
231
+
232
+ parts: list[str] = []
233
+ if current is not None and total and total > 0:
234
+ percent = (current / total) * 100
235
+ parts.append(f"{current}/{total}")
236
+ parts.append(f"{percent:.1f}%")
237
+ elif current is not None:
238
+ parts.append(str(current))
239
+ if unit:
240
+ parts.append(unit)
241
+ message = " ".join(parts).strip()
242
+ print(f"{key}: {message or 'update'}", flush=True)
243
+
244
+ try:
245
+ destination, summary, log_text = service_client.send_video(
246
+ input_path=Path(file),
247
+ output_path=output_override,
248
+ server_url=server_url,
249
+ small=bool(parsed_args.small),
250
+ log_callback=_stream_server_log,
251
+ stream_updates=stream_updates,
252
+ progress_callback=_stream_progress if stream_updates else None,
253
+ )
254
+ except Exception as exc: # pragma: no cover - network failure safeguard
255
+ return False, f"Failed to process {basename} via server: {exc}"
256
+
257
+ print(summary)
258
+ print(f"Saved processed video to {destination}")
259
+ if log_text.strip() and not printed_log_header:
260
+ print("\nServer log:\n" + log_text)
261
+
262
+ _print_total_time(start_time)
263
+ return True, None
264
+
265
+
131
266
  def _launch_gui(argv: Sequence[str]) -> bool:
132
267
  """Attempt to launch the GUI with the provided arguments."""
133
268
 
@@ -144,7 +279,7 @@ def _launch_gui(argv: Sequence[str]) -> bool:
144
279
 
145
280
 
146
281
  def _launch_server(argv: Sequence[str]) -> bool:
147
- """Attempt to launch the Gradio web server with the provided arguments."""
282
+ """Attempt to launch the Gradio server with the provided arguments."""
148
283
 
149
284
  try:
150
285
  server_module = import_module(".server", __package__)
@@ -159,6 +294,103 @@ def _launch_server(argv: Sequence[str]) -> bool:
159
294
  return True
160
295
 
161
296
 
297
+ def _find_server_tray_binary() -> Optional[Path]:
298
+ """Return the best available path to the server tray executable."""
299
+
300
+ binary_name = "talks-reducer-server-tray"
301
+ candidates: List[Path] = []
302
+
303
+ which_path = shutil.which(binary_name)
304
+ if which_path:
305
+ candidates.append(Path(which_path))
306
+
307
+ try:
308
+ launcher_dir = Path(sys.argv[0]).resolve().parent
309
+ except Exception:
310
+ launcher_dir = None
311
+
312
+ potential_names = [binary_name]
313
+ if sys.platform == "win32":
314
+ potential_names = [f"{binary_name}.exe", binary_name]
315
+
316
+ if launcher_dir is not None:
317
+ for name in potential_names:
318
+ candidates.append(launcher_dir / name)
319
+
320
+ for candidate in candidates:
321
+ if candidate and candidate.exists() and os.access(candidate, os.X_OK):
322
+ return candidate
323
+
324
+ return None
325
+
326
+
327
+ def _should_hide_subprocess_console() -> bool:
328
+ """Return ``True` ` when a detached Windows launch should hide the console."""
329
+
330
+ if sys.platform != "win32":
331
+ return False
332
+
333
+ try:
334
+ import ctypes
335
+ except Exception: # pragma: no cover - optional runtime dependency
336
+ return False
337
+
338
+ try:
339
+ get_console_window = ctypes.windll.kernel32.GetConsoleWindow # type: ignore[attr-defined]
340
+ except Exception: # pragma: no cover - platform specific guard
341
+ return False
342
+
343
+ try:
344
+ handle = get_console_window()
345
+ except Exception: # pragma: no cover - defensive fallback
346
+ return False
347
+
348
+ return handle == 0
349
+
350
+
351
+ def _launch_server_tray_binary(argv: Sequence[str]) -> bool:
352
+ """Launch the packaged server tray executable when available."""
353
+
354
+ command = _find_server_tray_binary()
355
+ if command is None:
356
+ return False
357
+
358
+ tray_args = [str(command), *list(argv)]
359
+
360
+ run_kwargs: Dict[str, object] = {"check": False}
361
+
362
+ if sys.platform == "win32":
363
+ no_window_flag = getattr(subprocess, "CREATE_NO_WINDOW", 0)
364
+ if no_window_flag and _should_hide_subprocess_console():
365
+ run_kwargs["creationflags"] = no_window_flag
366
+
367
+ try:
368
+ result = subprocess.run(tray_args, **run_kwargs)
369
+ except OSError:
370
+ return False
371
+
372
+ return result.returncode == 0
373
+
374
+
375
+ def _launch_server_tray(argv: Sequence[str]) -> bool:
376
+ """Attempt to launch the server tray helper with the provided arguments."""
377
+
378
+ if _launch_server_tray_binary(argv):
379
+ return True
380
+
381
+ try:
382
+ tray_module = import_module(".server_tray", __package__)
383
+ except ImportError:
384
+ return False
385
+
386
+ tray_main = getattr(tray_module, "main", None)
387
+ if tray_main is None:
388
+ return False
389
+
390
+ tray_main(list(argv))
391
+ return True
392
+
393
+
162
394
  def main(argv: Optional[Sequence[str]] = None) -> None:
163
395
  """Entry point for the command line interface.
164
396
 
@@ -170,6 +402,14 @@ def main(argv: Optional[Sequence[str]] = None) -> None:
170
402
  else:
171
403
  argv_list = list(argv)
172
404
 
405
+ if "--server" in argv_list:
406
+ index = argv_list.index("--server")
407
+ tray_args = argv_list[index + 1 :]
408
+ if not _launch_server_tray(tray_args):
409
+ print("Server tray mode is unavailable.", file=sys.stderr)
410
+ sys.exit(1)
411
+ return
412
+
173
413
  if argv_list and argv_list[0] in {"server", "serve"}:
174
414
  if not _launch_server(argv_list[1:]):
175
415
  print("Gradio server mode is unavailable.", file=sys.stderr)
@@ -186,6 +426,11 @@ def main(argv: Optional[Sequence[str]] = None) -> None:
186
426
 
187
427
  parser = _build_parser()
188
428
  parsed_args = parser.parse_args(argv_list)
429
+
430
+ host_value = getattr(parsed_args, "host", None)
431
+ if host_value:
432
+ parsed_args.server_url = f"http://{host_value}:9005"
433
+
189
434
  start_time = time.time()
190
435
 
191
436
  files = gather_input_files(parsed_args.input_file)
@@ -195,11 +440,33 @@ def main(argv: Optional[Sequence[str]] = None) -> None:
195
440
  }
196
441
  del args["input_file"]
197
442
 
443
+ if "host" in args:
444
+ del args["host"]
445
+
198
446
  if len(files) > 1 and "output_file" in args:
199
447
  del args["output_file"]
200
448
 
449
+ fallback_messages: List[str] = []
450
+ if parsed_args.server_url:
451
+ server_success, error_message = _process_via_server(
452
+ files, parsed_args, start_time=start_time
453
+ )
454
+ if server_success:
455
+ return
456
+
457
+ fallback_reason = error_message or "Server processing is unavailable."
458
+ print(fallback_reason, file=sys.stderr)
459
+ fallback_messages.append(fallback_reason)
460
+
461
+ fallback_notice = "Falling back to local processing pipeline."
462
+ print(fallback_notice, file=sys.stderr)
463
+ fallback_messages.append(fallback_notice)
464
+
201
465
  reporter = TqdmProgressReporter()
202
466
 
467
+ for message in fallback_messages:
468
+ reporter.log(message)
469
+
203
470
  for index, file in enumerate(files):
204
471
  print(f"Processing file {index + 1}/{len(files)} '{os.path.basename(file)}'")
205
472
  local_options = dict(args)
@@ -241,11 +508,7 @@ def main(argv: Optional[Sequence[str]] = None) -> None:
241
508
  if summary_parts:
242
509
  reporter.log("Result: " + ", ".join(summary_parts))
243
510
 
244
- end_time = time.time()
245
- total_time = end_time - start_time
246
- hours, remainder = divmod(total_time, 3600)
247
- minutes, seconds = divmod(remainder, 60)
248
- print(f"\nTime: {int(hours)}h {int(minutes)}m {seconds:.2f}s")
511
+ _print_total_time(start_time)
249
512
 
250
513
 
251
514
  if __name__ == "__main__":
@@ -0,0 +1,149 @@
1
+ """Utilities for discovering Talks Reducer servers on the local network."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import ipaddress
6
+ import socket
7
+ from concurrent.futures import ThreadPoolExecutor
8
+ from contextlib import closing
9
+ from http.client import HTTPConnection
10
+ from typing import Callable, Iterable, Iterator, List, Optional, Set
11
+
12
+ DEFAULT_PORT = 9005
13
+ DEFAULT_TIMEOUT = 0.4
14
+
15
+ _EXCLUDED_HOSTS = {"127.0.0.1", "localhost", "0.0.0.0"}
16
+
17
+
18
+ def _should_include_host(host: Optional[str]) -> bool:
19
+ """Return ``True`` when *host* should be scanned for discovery."""
20
+
21
+ if not host:
22
+ return False
23
+ return host not in _EXCLUDED_HOSTS
24
+
25
+
26
+ def _iter_local_ipv4_addresses() -> Iterator[str]:
27
+ """Yield IPv4 addresses that belong to the local machine."""
28
+
29
+ seen: Set[str] = set()
30
+
31
+ try:
32
+ hostname = socket.gethostname()
33
+ for info in socket.getaddrinfo(hostname, None, family=socket.AF_INET):
34
+ address = info[4][0]
35
+ if address and address not in seen:
36
+ seen.add(address)
37
+ yield address
38
+ except socket.gaierror:
39
+ pass
40
+
41
+ for probe in ("8.8.8.8", "1.1.1.1"):
42
+ try:
43
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as sock:
44
+ sock.connect((probe, 80))
45
+ address = sock.getsockname()[0]
46
+ if address and address not in seen:
47
+ seen.add(address)
48
+ yield address
49
+ except OSError:
50
+ continue
51
+
52
+
53
+ def _build_default_host_candidates(prefix_length: int = 24) -> List[str]:
54
+ """Return a list of host candidates based on detected local networks."""
55
+
56
+ hosts: Set[str] = set()
57
+
58
+ for address in _iter_local_ipv4_addresses():
59
+ if _should_include_host(address):
60
+ hosts.add(address)
61
+ try:
62
+ network = ipaddress.ip_network(f"{address}/{prefix_length}", strict=False)
63
+ except ValueError:
64
+ continue
65
+ for host in network.hosts():
66
+ host_str = str(host)
67
+ if _should_include_host(host_str):
68
+ hosts.add(host_str)
69
+
70
+ return sorted(hosts)
71
+
72
+
73
+ def _probe_host(host: str, port: int, timeout: float) -> Optional[str]:
74
+ """Return the URL if *host* responds on *port* within *timeout* seconds."""
75
+
76
+ connection: Optional[HTTPConnection] = None
77
+ try:
78
+ connection = HTTPConnection(host, port, timeout=timeout)
79
+ connection.request(
80
+ "GET", "/", headers={"User-Agent": "talks-reducer-discovery"}
81
+ )
82
+ response = connection.getresponse()
83
+ # Drain the response to avoid ResourceWarning in some Python versions.
84
+ response.read()
85
+ if 200 <= response.status < 500:
86
+ return f"http://{host}:{port}/"
87
+ except OSError:
88
+ return None
89
+ finally:
90
+ if connection is not None:
91
+ try:
92
+ connection.close()
93
+ except Exception:
94
+ pass
95
+ return None
96
+
97
+
98
+ ProgressCallback = Callable[[int, int], None]
99
+
100
+
101
+ def discover_servers(
102
+ *,
103
+ port: int = DEFAULT_PORT,
104
+ timeout: float = DEFAULT_TIMEOUT,
105
+ hosts: Optional[Iterable[str]] = None,
106
+ progress_callback: Optional[ProgressCallback] = None,
107
+ ) -> List[str]:
108
+ """Scan *hosts* for running Talks Reducer servers on *port*.
109
+
110
+ When *hosts* is omitted, the local /24 networks derived from available IPv4
111
+ addresses are scanned. ``127.0.0.1``, ``localhost``, and ``0.0.0.0`` are
112
+ excluded to avoid duplicating local endpoints. The optional
113
+ *progress_callback* receives the number of scanned hosts and the total
114
+ candidate count whenever discovery advances. The function returns a sorted
115
+ list of unique base URLs.
116
+ """
117
+
118
+ if hosts is None:
119
+ candidates = sorted(
120
+ {
121
+ host
122
+ for host in _build_default_host_candidates()
123
+ if _should_include_host(host)
124
+ }
125
+ )
126
+ else:
127
+ candidates = sorted({host for host in hosts if _should_include_host(host)})
128
+
129
+ results: List[str] = []
130
+ total = len(candidates)
131
+
132
+ if progress_callback is not None:
133
+ progress_callback(0, total)
134
+
135
+ with ThreadPoolExecutor(max_workers=32) as executor:
136
+ scanned = 0
137
+ for url in executor.map(
138
+ lambda host: _probe_host(host, port, timeout), candidates
139
+ ):
140
+ if url and url not in results:
141
+ results.append(url)
142
+ scanned += 1
143
+ if progress_callback is not None:
144
+ progress_callback(scanned, total)
145
+
146
+ return results
147
+
148
+
149
+ __all__ = ["discover_servers"]