talks-reducer 0.5.5__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,12 +3,16 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import argparse
6
+ import asyncio
6
7
  import shutil
8
+ import time
9
+ from contextlib import suppress
7
10
  from pathlib import Path
8
- from typing import Optional, Sequence, Tuple
11
+ from typing import Callable, Optional, Sequence, Tuple
9
12
 
10
13
  from gradio_client import Client
11
14
  from gradio_client import file as gradio_file
15
+ from gradio_client.client import Status, StatusUpdate
12
16
 
13
17
 
14
18
  def send_video(
@@ -16,6 +20,12 @@ def send_video(
16
20
  output_path: Optional[Path],
17
21
  server_url: str,
18
22
  small: bool = False,
23
+ *,
24
+ log_callback: Optional[Callable[[str], None]] = None,
25
+ stream_updates: bool = False,
26
+ progress_callback: Optional[
27
+ Callable[[str, Optional[int], Optional[int], str], None]
28
+ ] = None,
19
29
  ) -> Tuple[Path, str, str]:
20
30
  """Upload *input_path* to the Gradio server and download the processed video."""
21
31
 
@@ -23,17 +33,56 @@ def send_video(
23
33
  raise FileNotFoundError(f"Input file does not exist: {input_path}")
24
34
 
25
35
  client = Client(server_url)
26
- prediction = client.predict(
36
+ job = client.submit(
27
37
  gradio_file(str(input_path)),
28
38
  bool(small),
29
39
  api_name="/process_video",
30
40
  )
31
41
 
42
+ printed_lines = 0
43
+
44
+ def _emit_new_lines(log_text: str) -> None:
45
+ nonlocal printed_lines
46
+ if log_callback is None or not log_text:
47
+ return
48
+ lines = log_text.splitlines()
49
+ if printed_lines < len(lines):
50
+ for line in lines[printed_lines:]:
51
+ log_callback(line)
52
+ printed_lines = len(lines)
53
+
54
+ consumed_stream = False
55
+
56
+ if stream_updates:
57
+ consumed_stream = _stream_job_updates(
58
+ job,
59
+ _emit_new_lines,
60
+ progress_callback=progress_callback,
61
+ )
62
+
63
+ if not consumed_stream:
64
+ for output in job:
65
+ if not isinstance(output, (list, tuple)) or len(output) != 4:
66
+ continue
67
+ log_text_candidate = output[1] or ""
68
+ if isinstance(log_text_candidate, str):
69
+ _emit_new_lines(log_text_candidate)
70
+
71
+ prediction = job.result()
72
+
32
73
  try:
33
- _, log_text, summary, download_path = prediction
74
+ video_path, log_text, summary, download_path = prediction
34
75
  except (TypeError, ValueError) as exc: # pragma: no cover - defensive
35
76
  raise RuntimeError("Unexpected response from server") from exc
36
77
 
78
+ if isinstance(log_text, str):
79
+ _emit_new_lines(log_text)
80
+ else:
81
+ log_text = ""
82
+
83
+ if not download_path:
84
+ download_path = video_path
85
+
37
86
  if not download_path:
38
87
  raise RuntimeError("Server did not return a processed file")
39
88
 
@@ -49,9 +98,174 @@ def send_video(
49
98
  if download_source.resolve() != destination.resolve():
50
99
  shutil.copy2(download_source, destination)
51
100
 
101
+ if not isinstance(summary, str):
102
+ summary = ""
103
+ if not isinstance(log_text, str):
104
+ log_text = ""
105
+
52
106
  return destination, summary, log_text
53
107
 
54
108
 
109
+ def _coerce_int(value: object) -> Optional[int]:
110
+ """Return *value* as an ``int`` when possible."""
111
+
112
+ try:
113
+ if value is None:
114
+ return None
115
+ return int(value)
116
+ except (TypeError, ValueError):
117
+ return None
118
+
119
+
120
+ def _emit_progress_update(
121
+ callback: Callable[[str, Optional[int], Optional[int], str], None],
122
+ unit: object,
123
+ ) -> None:
124
+ """Normalize a progress unit and forward it to *callback*."""
125
+
126
+ if unit is None:
127
+ return
128
+
129
+ if hasattr(unit, "__dict__"):
130
+ data = unit
131
+ desc = getattr(data, "desc", None)
132
+ length = getattr(data, "length", None)
133
+ index = getattr(data, "index", None)
134
+ progress = getattr(data, "progress", None)
135
+ unit_name = getattr(data, "unit", None)
136
+ elif isinstance(unit, dict):
137
+ desc = unit.get("desc")
138
+ length = unit.get("length")
139
+ index = unit.get("index")
140
+ progress = unit.get("progress")
141
+ unit_name = unit.get("unit")
142
+ else:
143
+ return
144
+
145
+ total = _coerce_int(length)
146
+ current = _coerce_int(index)
147
+ if current is None and isinstance(progress, (int, float)) and total:
148
+ current = int(progress / total)
149
+
150
+ callback(desc or "Processing", progress, total, str(unit_name or ""))
151
+
152
+
153
+ async def _pump_job_updates(
154
+ job,
155
+ emit_log: Callable[[str], None],
156
+ progress_callback: Optional[
157
+ Callable[[str, Optional[int], Optional[int], str], None]
158
+ ],
159
+ ) -> None:
160
+ """Consume asynchronous updates from *job* and emit logs and progress."""
161
+
162
+ async for update in job: # type: ignore[async-for]
163
+ update_type = getattr(update, "type", "status")
164
+ if update_type == "output":
165
+ outputs = getattr(update, "outputs", None) or []
166
+ if isinstance(outputs, (list, tuple)) and len(outputs) == 4:
167
+ log_text_candidate = outputs[1] or ""
168
+ if isinstance(log_text_candidate, str):
169
+ emit_log(log_text_candidate)
170
+ if getattr(update, "final", False):
171
+ break
172
+ continue
173
+
174
+ status_update: StatusUpdate = update # type: ignore[assignment]
175
+ log_entry = getattr(status_update, "log", None)
176
+ if log_entry:
177
+ message = (
178
+ log_entry[0] if isinstance(log_entry, (list, tuple)) else log_entry
179
+ )
180
+ if isinstance(message, str):
181
+ emit_log(message)
182
+
183
+ if progress_callback and status_update.progress_data:
184
+ for unit in status_update.progress_data:
185
+ _emit_progress_update(progress_callback, unit)
186
+
187
+ if status_update.code in {Status.FINISHED, Status.CANCELLED}:
188
+ break
189
+
190
+
191
+ def _poll_job_updates(
192
+ job,
193
+ emit_log: Callable[[str], None],
194
+ progress_callback: Optional[
195
+ Callable[[str, Optional[int], Optional[int], str], None]
196
+ ],
197
+ *,
198
+ interval: float = 0.25,
199
+ ) -> None:
200
+ """Poll *job* for outputs and status updates when async streaming is unavailable."""
201
+
202
+ while True:
203
+ if job.done():
204
+ break
205
+
206
+ status: Optional[StatusUpdate] = None
207
+ with suppress(Exception):
208
+ status = job.status() # type: ignore[assignment]
209
+
210
+ if status is not None:
211
+ if progress_callback:
212
+ progress_data = getattr(status, "progress_data", None)
213
+ if progress_data:
214
+ for unit in progress_data:
215
+ _emit_progress_update(progress_callback, unit)
216
+ log_entry = getattr(status, "log", None)
217
+ if log_entry:
218
+ message = (
219
+ log_entry[0] if isinstance(log_entry, (list, tuple)) else log_entry
220
+ )
221
+ if isinstance(message, str):
222
+ emit_log(message)
223
+
224
+ outputs = []
225
+ with suppress(Exception):
226
+ outputs = job.outputs()
227
+ if outputs:
228
+ latest = outputs[-1]
229
+ if isinstance(latest, (list, tuple)) and len(latest) == 4:
230
+ log_text_candidate = latest[1] or ""
231
+ if isinstance(log_text_candidate, str):
232
+ emit_log(log_text_candidate)
233
+
234
+ time.sleep(interval)
235
+
236
+
237
+ def _stream_job_updates(
238
+ job,
239
+ emit_log: Callable[[str], None],
240
+ *,
241
+ progress_callback: Optional[
242
+ Callable[[str, Optional[int], Optional[int], str], None]
243
+ ] = None,
244
+ ) -> bool:
245
+ """Attempt to stream updates directly from *job*.
246
+
247
+ Returns ``True`` when streaming occurred, ``False`` when the legacy
248
+ generator-based fallback should be used.
249
+ """
250
+
251
+ communicator = getattr(job, "communicator", None)
252
+ if communicator is None:
253
+ return False
254
+
255
+ try:
256
+ asyncio.run(
257
+ _pump_job_updates(
258
+ job,
259
+ emit_log,
260
+ progress_callback,
261
+ )
262
+ )
263
+ except RuntimeError:
264
+ _poll_job_updates(job, emit_log, progress_callback)
265
+
266
+ return True
267
+
268
+
55
269
  def _build_parser() -> argparse.ArgumentParser:
56
270
  parser = argparse.ArgumentParser(
57
271
  description="Send a video to a running talks-reducer server and download the result.",
@@ -78,6 +292,11 @@ def _build_parser() -> argparse.ArgumentParser:
78
292
  action="store_true",
79
293
  help="Print the server log after processing completes.",
80
294
  )
295
+ parser.add_argument(
296
+ "--stream",
297
+ action="store_true",
298
+ help="Stream remote progress updates while waiting for the result.",
299
+ )
81
300
  return parser
82
301
 
83
302
 
@@ -85,16 +304,51 @@ def main(argv: Optional[Sequence[str]] = None) -> None:
85
304
  parser = _build_parser()
86
305
  args = parser.parse_args(argv)
87
306
 
307
+ printed_log_header = False
308
+
309
+ def _stream(line: str) -> None:
310
+ nonlocal printed_log_header
311
+ if not printed_log_header:
312
+ print("\nServer log:", flush=True)
313
+ printed_log_header = True
314
+ print(line, flush=True)
315
+
316
+ progress_state: dict[str, tuple[Optional[int], Optional[int], str]] = {}
317
+
318
+ def _progress(
319
+ desc: str, current: Optional[int], total: Optional[int], unit: str
320
+ ) -> None:
321
+ key = desc or "Processing"
322
+ state = (current, total, unit)
323
+ if progress_state.get(key) == state:
324
+ return
325
+ progress_state[key] = state
326
+
327
+ parts: list[str] = []
328
+ if current is not None and total and total > 0:
329
+ percent = (current / total) * 100
330
+ parts.append(f"{current}/{total}")
331
+ parts.append(f"{percent:.1f}%")
332
+ elif current is not None:
333
+ parts.append(str(current))
334
+ if unit:
335
+ parts.append(unit)
336
+ message = " ".join(parts).strip()
337
+ print(f"{key}: {message or 'update'}", flush=True)
338
+
88
339
  destination, summary, log_text = send_video(
89
340
  input_path=args.input.expanduser(),
90
341
  output_path=args.output.expanduser() if args.output else None,
91
342
  server_url=args.server,
92
343
  small=args.small,
344
+ log_callback=_stream if args.print_log else None,
345
+ stream_updates=args.stream,
346
+ progress_callback=_progress if args.stream else None,
93
347
  )
94
348
 
95
349
  print(summary)
96
350
  print(f"Saved processed video to {destination}")
97
- if args.print_log:
351
+ if args.print_log and log_text.strip() and not printed_log_header:
98
352
  print("\nServer log:\n" + log_text)
99
353
 
100
354
 
@@ -0,0 +1,22 @@
1
+ """Utilities for retrieving the Talks Reducer version across environments."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from importlib.metadata import PackageNotFoundError, version as metadata_version
6
+
7
+ try: # pragma: no cover - defensive fallback when metadata module missing
8
+ from .__about__ import __version__ as _about_version
9
+ except Exception: # pragma: no cover - runtime fallback in frozen apps
10
+ _about_version = ""
11
+
12
+
13
+ def resolve_version(package_name: str = "talks-reducer") -> str:
14
+ """Return the package version, preferring bundled metadata when available."""
15
+
16
+ if _about_version:
17
+ return _about_version
18
+
19
+ try:
20
+ return metadata_version(package_name)
21
+ except (PackageNotFoundError, Exception):
22
+ return "unknown"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: talks-reducer
3
- Version: 0.5.5
3
+ Version: 0.6.1
4
4
  Summary: CLI for speeding up long-form talks by removing silence
5
5
  Author: Talks Reducer Maintainers
6
6
  License-Expression: MIT
@@ -72,12 +72,17 @@ talks-reducer --small input.mp4
72
72
 
73
73
  Need to offload work to a remote Talks Reducer server? Pass `--url` with the
74
74
  server address and the CLI will upload the input, wait for processing to finish,
75
- and download the rendered video:
75
+ and download the rendered video. You can also provide `--host` to expand to the
76
+ default Talks Reducer port (`http://<host>:9005`):
76
77
 
77
78
  ```sh
78
79
  talks-reducer --url http://localhost:9005 demo.mp4
80
+ talks-reducer --host 192.168.1.42 demo.mp4
79
81
  ```
80
82
 
83
+ Want to see progress as the remote server works? Add `--server-stream` so the
84
+ CLI prints live progress bars and log lines while you wait for the download.
85
+
81
86
  ### Speech detection
82
87
 
83
88
  Talks Reducer now relies on its built-in volume thresholding to detect speech. Adjust `--silent_threshold` if you need to fine-tune when segments count as silence. Dropping the optional Silero VAD integration keeps the install lightweight and avoids pulling in PyTorch.
@@ -106,7 +111,8 @@ desktop shortcut without opening the GUI first.
106
111
  Pass `--debug` to print verbose logs about the tray icon lifecycle, and
107
112
  `--tray-mode pystray-detached` to try pystray's alternate detached runner. If
108
113
  the icon backend refuses to appear, fall back to `--tray-mode headless` to keep
109
- the web server running without a tray process. The tray menu includes an **Open GUI**
114
+ the web server running without a tray process. The tray menu highlights the
115
+ running Talks Reducer version and includes an **Open GUI**
110
116
  item (also triggered by double-clicking the icon) that launches the desktop
111
117
  Talks Reducer interface alongside an **Open WebUI** entry that opens the Gradio
112
118
  page in your browser. Close the GUI window to return to the tray without
@@ -120,13 +126,18 @@ browser automatically—pass `--open-browser` if you prefer the web page to
120
126
  launch as soon as the server is ready.
121
127
 
122
128
  This opens a local web page featuring a drag-and-drop upload zone, a **Small video** checkbox that mirrors the CLI preset, a live
123
- progress indicator, and automatic previews of the processed output. Once the job completes you can inspect the resulting compression
124
- ratio and download the rendered video directly from the page.
129
+ progress indicator, and automatic previews of the processed output. The page header and browser tab title include the current
130
+ Talks Reducer version so you can confirm which build the server is running. Once the job completes you can inspect the resulting
131
+ compression ratio and download the rendered video directly from the page.
125
132
 
126
133
  The desktop GUI mirrors this behaviour. Open **Advanced** settings to provide a
127
134
  server URL and click **Discover** to scan your local network for Talks Reducer
128
- instances listening on port `9005`. Leaving the field blank keeps processing
129
- local; selecting a discovered server delegates rendering to that machine while
135
+ instances listening on port `9005`. The button now updates with the discovery
136
+ progress, showing the scanned/total host count as `scanned / total`. A new
137
+ **Processing mode** toggle lets you decide whether work stays local or uploads
138
+ to the configured server—the **Remote** option becomes available as soon as a
139
+ URL is supplied. Leave the toggle on **Local** to keep rendering on this
140
+ machine even if a server is saved; switch to **Remote** to hand jobs off while
130
141
  the GUI downloads the finished files automatically.
131
142
 
132
143
  ### Uploading and retrieving a processed video
@@ -0,0 +1,22 @@
1
+ talks_reducer/__about__.py,sha256=KW-viuoc2AcVVTHYXfURFg9xsicuBA_X9n4tgL9p2ag,92
2
+ talks_reducer/__init__.py,sha256=Kzh1hXaw6Vq3DyTqrnJGOq8pn0P8lvaDcsg1bFUjFKk,208
3
+ talks_reducer/__main__.py,sha256=azR_vh8HFPLaOnh-L6gUFWsL67I6iHtbeH5rQhsipGY,299
4
+ talks_reducer/audio.py,sha256=sjHMeY0H9ESG-Gn5BX0wFRBX7sXjWwsgS8u9Vb0bJ88,4396
5
+ talks_reducer/chunks.py,sha256=IpdZxRFPURSG5wP-OQ_p09CVP8wcKwIFysV29zOTSWI,2959
6
+ talks_reducer/cli.py,sha256=JH8lvPUyk6jWGcnNRIGJeIh2ZcOvC5CORvj5GLuqq0c,16075
7
+ talks_reducer/discovery.py,sha256=BJ-iMir65cJMs0u-_EYdknBQT_grvCZaJNOx1xGi2PU,4590
8
+ talks_reducer/ffmpeg.py,sha256=dsHBOBcr5XCSg0q3xmzLOcibBiEdyrXdEQa-ze5vQsM,12551
9
+ talks_reducer/gui.py,sha256=mTB4m9LxqqTAaHoNf1QZZ4KfHiSMk0C7SJ5BDhqNR3Q,89777
10
+ talks_reducer/models.py,sha256=6Q_8rmHLyImXp88D4B7ptTbFaH_xXa_yxs8A2dypz2Y,2004
11
+ talks_reducer/pipeline.py,sha256=naAP8gli9MahoxVdla2tRn2vdDuBBU5ywWkYfZLkMcE,12211
12
+ talks_reducer/progress.py,sha256=Mh43M6VWhjjUv9CI22xfD2EJ_7Aq3PCueqefQ9Bd5-o,4565
13
+ talks_reducer/server.py,sha256=IzPe0qkZTs8kqoc9gtGDSWtwN2CTWHVcsYzQah-W3cU,13977
14
+ talks_reducer/server_tray.py,sha256=9jqYfjBpXj7_V_ZlVc7oSDKYRPpG7sOGoxuKvSGEQGM,23373
15
+ talks_reducer/service_client.py,sha256=JnhQhRxVwrGo9eUlduoZ6f_YYyXvjU7hF8UTNprH7TM,10984
16
+ talks_reducer/version_utils.py,sha256=TkYrTznVb2JqxFXzVzPd6PEnYP2MH7dxKl1J4-3DjMA,755
17
+ talks_reducer-0.6.1.dist-info/licenses/LICENSE,sha256=jN17mHNR3e84awmH3AbpWBcBDBzPxEH0rcOFoj1s7sQ,1124
18
+ talks_reducer-0.6.1.dist-info/METADATA,sha256=iTbxOTuZMnT2UG5KmwKDpKuy555CtsTSps7KSPAfJc0,8326
19
+ talks_reducer-0.6.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
20
+ talks_reducer-0.6.1.dist-info/entry_points.txt,sha256=X2pjoh2vWBXXExVWorv1mbA1aTEVP3fyuZH4AixqZK4,208
21
+ talks_reducer-0.6.1.dist-info/top_level.txt,sha256=pJWGcy__LR9JIEKH3QJyFmk9XrIsiFtqvuMNxFdIzDU,14
22
+ talks_reducer-0.6.1.dist-info/RECORD,,
@@ -1,21 +0,0 @@
1
- talks_reducer/__about__.py,sha256=yezjPkWreEY8UO-8bak-0saMNb0oEtJ9FAirgiDvr-Q,92
2
- talks_reducer/__init__.py,sha256=Kzh1hXaw6Vq3DyTqrnJGOq8pn0P8lvaDcsg1bFUjFKk,208
3
- talks_reducer/__main__.py,sha256=azR_vh8HFPLaOnh-L6gUFWsL67I6iHtbeH5rQhsipGY,299
4
- talks_reducer/audio.py,sha256=sjHMeY0H9ESG-Gn5BX0wFRBX7sXjWwsgS8u9Vb0bJ88,4396
5
- talks_reducer/chunks.py,sha256=IpdZxRFPURSG5wP-OQ_p09CVP8wcKwIFysV29zOTSWI,2959
6
- talks_reducer/cli.py,sha256=3QSvfX6y6t9XxRa0DSYvsx8SZ5SHSuTlqjp2GMaTSLo,10820
7
- talks_reducer/discovery.py,sha256=vuQ74YyqTp0y4qabWU6d1NPRst5-Ae7gJ7-KSkNgQNM,3659
8
- talks_reducer/ffmpeg.py,sha256=dsHBOBcr5XCSg0q3xmzLOcibBiEdyrXdEQa-ze5vQsM,12551
9
- talks_reducer/gui.py,sha256=BkbrzzUFOAT_7bJzkZCjvNn6TavDFQhnG3VnLgYq60M,69716
10
- talks_reducer/models.py,sha256=6Q_8rmHLyImXp88D4B7ptTbFaH_xXa_yxs8A2dypz2Y,2004
11
- talks_reducer/pipeline.py,sha256=JnWa84sMwYncGv7crhGLZu0cW1Xx0eGQyFP-nLp-DHk,12222
12
- talks_reducer/progress.py,sha256=Mh43M6VWhjjUv9CI22xfD2EJ_7Aq3PCueqefQ9Bd5-o,4565
13
- talks_reducer/server.py,sha256=R3RORNaZZMGHUNYV6J6IQkFjoR_UJb4y1hmh1tONl60,11240
14
- talks_reducer/server_tray.py,sha256=lC0D-3yWn_RJjwVtemJ-QzObjWLjH01HMrNibPlw84I,13375
15
- talks_reducer/service_client.py,sha256=Hv3hKBUn2n7M5hLUVHamNphUdMt9bCXUrJaJMZHlE0Q,3088
16
- talks_reducer-0.5.5.dist-info/licenses/LICENSE,sha256=jN17mHNR3e84awmH3AbpWBcBDBzPxEH0rcOFoj1s7sQ,1124
17
- talks_reducer-0.5.5.dist-info/METADATA,sha256=1FS759rqbikGdBrY9pqcllenA9diuVgN1xeQXNC-ics,7527
18
- talks_reducer-0.5.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
- talks_reducer-0.5.5.dist-info/entry_points.txt,sha256=X2pjoh2vWBXXExVWorv1mbA1aTEVP3fyuZH4AixqZK4,208
20
- talks_reducer-0.5.5.dist-info/top_level.txt,sha256=pJWGcy__LR9JIEKH3QJyFmk9XrIsiFtqvuMNxFdIzDU,14
21
- talks_reducer-0.5.5.dist-info/RECORD,,