talks-reducer 0.7.2__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
talks_reducer/server.py CHANGED
@@ -9,6 +9,7 @@ import socket
9
9
  import sys
10
10
  import tempfile
11
11
  from contextlib import AbstractContextManager, suppress
12
+ from dataclasses import dataclass
12
13
  from pathlib import Path
13
14
  from queue import SimpleQueue
14
15
  from threading import Thread
@@ -244,6 +245,98 @@ def _format_summary(result: ProcessingResult) -> str:
244
245
  return "\n".join(lines)
245
246
 
246
247
 
248
+ PipelineEvent = tuple[str, object]
249
+
250
+
251
+ def _default_reporter_factory(
252
+ progress_callback: Optional[Callable[[int, int, str], None]],
253
+ log_callback: Callable[[str], None],
254
+ ) -> SignalProgressReporter:
255
+ """Construct a :class:`GradioProgressReporter` with the given callbacks."""
256
+
257
+ return GradioProgressReporter(
258
+ progress_callback=progress_callback,
259
+ log_callback=log_callback,
260
+ )
261
+
262
+
263
+ def run_pipeline_job(
264
+ options: ProcessingOptions,
265
+ *,
266
+ speed_up: Callable[[ProcessingOptions, SignalProgressReporter], ProcessingResult],
267
+ reporter_factory: Callable[
268
+ [Optional[Callable[[int, int, str], None]], Callable[[str], None]],
269
+ SignalProgressReporter,
270
+ ],
271
+ events: SimpleQueue[PipelineEvent],
272
+ enable_progress: bool = True,
273
+ start_in_thread: bool = True,
274
+ ) -> Iterator[PipelineEvent]:
275
+ """Execute the processing pipeline and yield emitted events."""
276
+
277
+ def _emit(kind: str, payload: object) -> None:
278
+ events.put((kind, payload))
279
+
280
+ progress_callback: Optional[Callable[[int, int, str], None]] = None
281
+ if enable_progress:
282
+ progress_callback = lambda current, total, desc: _emit(
283
+ "progress", (current, total, desc)
284
+ )
285
+
286
+ reporter = reporter_factory(
287
+ progress_callback, lambda message: _emit("log", message)
288
+ )
289
+
290
+ def _worker() -> None:
291
+ try:
292
+ result = speed_up(options, reporter=reporter)
293
+ except FFmpegNotFoundError as exc: # pragma: no cover - depends on runtime env
294
+ _emit("error", gr.Error(str(exc)))
295
+ except FileNotFoundError as exc:
296
+ _emit("error", gr.Error(str(exc)))
297
+ except Exception as exc: # pragma: no cover - defensive fallback
298
+ reporter.log(f"Error: {exc}")
299
+ _emit("error", gr.Error(f"Failed to process the video: {exc}"))
300
+ else:
301
+ reporter.log("Processing complete.")
302
+ _emit("result", result)
303
+ finally:
304
+ _emit("done", None)
305
+
306
+ thread: Optional[Thread] = None
307
+ if start_in_thread:
308
+ thread = Thread(target=_worker, daemon=True)
309
+ thread.start()
310
+ else:
311
+ _worker()
312
+
313
+ try:
314
+ while True:
315
+ kind, payload = events.get()
316
+ if kind == "done":
317
+ break
318
+ yield (kind, payload)
319
+ finally:
320
+ if thread is not None:
321
+ thread.join()
322
+
323
+
324
+ @dataclass
325
+ class ProcessVideoDependencies:
326
+ """Container for dependencies used by :func:`process_video`."""
327
+
328
+ speed_up: Callable[
329
+ [ProcessingOptions, SignalProgressReporter], ProcessingResult
330
+ ] = speed_up_video
331
+ reporter_factory: Callable[
332
+ [Optional[Callable[[int, int, str], None]], Callable[[str], None]],
333
+ SignalProgressReporter,
334
+ ] = _default_reporter_factory
335
+ queue_factory: Callable[[], SimpleQueue[PipelineEvent]] = SimpleQueue
336
+ run_pipeline_job_func: Callable[..., Iterator[PipelineEvent]] = run_pipeline_job
337
+ start_in_thread: bool = True
338
+
339
+
247
340
  def process_video(
248
341
  file_path: Optional[str],
249
342
  small_video: bool,
@@ -251,6 +344,8 @@ def process_video(
251
344
  sounded_speed: Optional[float] = None,
252
345
  silent_speed: Optional[float] = None,
253
346
  progress: Optional[gr.Progress] = gr.Progress(track_tqdm=False),
347
+ *,
348
+ dependencies: Optional[ProcessVideoDependencies] = None,
254
349
  ) -> Iterator[tuple[Optional[str], str, str, Optional[str]]]:
255
350
  """Run the Talks Reducer pipeline for a single uploaded file."""
256
351
 
@@ -265,23 +360,8 @@ def process_video(
265
360
  temp_folder = workspace / "temp"
266
361
  output_file = _build_output_path(input_path, workspace, small_video)
267
362
 
268
- progress_callback: Optional[Callable[[int, int, str], None]] = None
269
- if progress is not None:
270
-
271
- def _callback(current: int, total: int, desc: str) -> None:
272
- events.put(("progress", (current, total, desc)))
273
-
274
- progress_callback = _callback
275
-
276
- events: "SimpleQueue[tuple[str, object]]" = SimpleQueue()
277
-
278
- def _log_callback(message: str) -> None:
279
- events.put(("log", message))
280
-
281
- reporter = GradioProgressReporter(
282
- progress_callback=progress_callback,
283
- log_callback=_log_callback,
284
- )
363
+ deps = dependencies or ProcessVideoDependencies()
364
+ events = deps.queue_factory()
285
365
 
286
366
  option_kwargs: dict[str, float] = {}
287
367
  if silent_threshold is not None:
@@ -299,31 +379,20 @@ def process_video(
299
379
  **option_kwargs,
300
380
  )
301
381
 
302
- def _worker() -> None:
303
- try:
304
- result = speed_up_video(options, reporter=reporter)
305
- except FFmpegNotFoundError as exc: # pragma: no cover - depends on runtime env
306
- events.put(("error", gr.Error(str(exc))))
307
- except FileNotFoundError as exc:
308
- events.put(("error", gr.Error(str(exc))))
309
- except Exception as exc: # pragma: no cover - defensive fallback
310
- reporter.log(f"Error: {exc}")
311
- events.put(("error", gr.Error(f"Failed to process the video: {exc}")))
312
- else:
313
- reporter.log("Processing complete.")
314
- events.put(("result", result))
315
- finally:
316
- events.put(("done", None))
317
-
318
- worker = Thread(target=_worker, daemon=True)
319
- worker.start()
382
+ event_stream = deps.run_pipeline_job_func(
383
+ options,
384
+ speed_up=deps.speed_up,
385
+ reporter_factory=deps.reporter_factory,
386
+ events=events,
387
+ enable_progress=progress is not None,
388
+ start_in_thread=deps.start_in_thread,
389
+ )
320
390
 
321
391
  collected_logs: list[str] = []
322
392
  final_result: Optional[ProcessingResult] = None
323
393
  error: Optional[gr.Error] = None
324
394
 
325
- while True:
326
- kind, payload = events.get()
395
+ for kind, payload in event_stream:
327
396
  if kind == "log":
328
397
  text = str(payload).strip()
329
398
  if text:
@@ -343,10 +412,6 @@ def process_video(
343
412
  final_result = payload # type: ignore[assignment]
344
413
  elif kind == "error":
345
414
  error = payload # type: ignore[assignment]
346
- elif kind == "done":
347
- break
348
-
349
- worker.join()
350
415
 
351
416
  if error is not None:
352
417
  raise error
@@ -12,7 +12,7 @@ import time
12
12
  import webbrowser
13
13
  from contextlib import suppress
14
14
  from pathlib import Path
15
- from typing import Any, Iterator, Optional, Sequence
15
+ from typing import Any, Callable, Iterator, Optional, Sequence
16
16
  from urllib.parse import urlsplit, urlunsplit
17
17
 
18
18
  from PIL import Image
@@ -49,9 +49,27 @@ def _guess_local_url(host: Optional[str], port: int) -> str:
49
49
  return f"http://{hostname}:{port}/"
50
50
 
51
51
 
52
- def _normalize_local_url(url: str, host: Optional[str], port: int) -> str:
52
+ def _coerce_url(value: Optional[Any]) -> Optional[str]:
53
+ """Convert an arbitrary URL-like object to a trimmed string if possible."""
54
+
55
+ if not value:
56
+ return None
57
+
58
+ try:
59
+ text = str(value)
60
+ except Exception: # pragma: no cover - defensive fallback
61
+ return None
62
+
63
+ stripped = text.strip()
64
+ return stripped or None
65
+
66
+
67
+ def _normalize_local_url(url: Optional[str], host: Optional[str], port: int) -> str:
53
68
  """Rewrite *url* when a wildcard host should map to the loopback address."""
54
69
 
70
+ if not url:
71
+ return _guess_local_url(host, port)
72
+
55
73
  if host not in (None, "", "0.0.0.0"):
56
74
  return url
57
75
 
@@ -131,6 +149,15 @@ def _load_icon() -> Image.Image:
131
149
  return _generate_fallback_icon()
132
150
 
133
151
 
152
+ class _HeadlessTrayBackend:
153
+ """Placeholder backend used when the tray icon is disabled."""
154
+
155
+ def __getattr__(self, name: str) -> Any:
156
+ raise RuntimeError(
157
+ "System tray backend is unavailable when running in headless mode."
158
+ )
159
+
160
+
134
161
  class _ServerTrayApplication:
135
162
  """Coordinate the Gradio server lifecycle and the system tray icon."""
136
163
 
@@ -142,21 +169,28 @@ class _ServerTrayApplication:
142
169
  share: bool,
143
170
  open_browser: bool,
144
171
  tray_mode: str,
172
+ tray_backend: Any,
173
+ build_interface: Callable[[], Any],
174
+ open_browser_callback: Callable[[str], Any],
145
175
  ) -> None:
146
176
  self._host = host
147
177
  self._port = port
148
178
  self._share = share
149
179
  self._open_browser_on_start = open_browser
150
180
  self._tray_mode = tray_mode
181
+ self._tray_backend = tray_backend
182
+ self._build_interface = build_interface
183
+ self._open_browser = open_browser_callback
151
184
 
152
185
  self._stop_event = threading.Event()
186
+ self._server_ready_event = threading.Event()
153
187
  self._ready_event = threading.Event()
154
188
  self._gui_lock = threading.Lock()
155
189
 
156
190
  self._server_handle: Optional[Any] = None
157
191
  self._local_url: Optional[str] = None
158
192
  self._share_url: Optional[str] = None
159
- self._icon: Optional[pystray.Icon] = None
193
+ self._icon: Optional[Any] = None
160
194
  self._gui_process: Optional[subprocess.Popen[Any]] = None
161
195
  self._startup_error: Optional[BaseException] = None
162
196
 
@@ -171,7 +205,7 @@ class _ServerTrayApplication:
171
205
  self._port,
172
206
  self._share,
173
207
  )
174
- demo = build_interface()
208
+ demo = self._build_interface()
175
209
  server = demo.launch(
176
210
  server_name=self._host,
177
211
  server_port=self._port,
@@ -183,15 +217,15 @@ class _ServerTrayApplication:
183
217
 
184
218
  self._server_handle = server
185
219
  fallback_url = _guess_local_url(self._host, self._port)
186
- local_url = getattr(server, "local_url", fallback_url)
220
+ local_url = _coerce_url(getattr(server, "local_url", fallback_url))
187
221
  self._local_url = _normalize_local_url(local_url, self._host, self._port)
188
- self._share_url = getattr(server, "share_url", None)
189
- self._ready_event.set()
222
+ self._share_url = _coerce_url(getattr(server, "share_url", None))
223
+ self._server_ready_event.set()
190
224
  LOGGER.info("Server ready at %s", self._local_url)
191
225
 
192
226
  # Keep checking for a share URL while the server is running.
193
227
  while not self._stop_event.is_set():
194
- share_url = getattr(server, "share_url", None)
228
+ share_url = _coerce_url(getattr(server, "share_url", None))
195
229
  if share_url:
196
230
  self._share_url = share_url
197
231
  LOGGER.info("Share URL available: %s", share_url)
@@ -206,12 +240,12 @@ class _ServerTrayApplication:
206
240
 
207
241
  def _handle_open_webui(
208
242
  self,
209
- _icon: Optional[pystray.Icon] = None,
210
- _item: Optional[pystray.MenuItem] = None,
243
+ _icon: Optional[Any] = None,
244
+ _item: Optional[Any] = None,
211
245
  ) -> None:
212
246
  url = self._resolve_url()
213
247
  if url:
214
- webbrowser.open(url)
248
+ self._open_browser(url)
215
249
  LOGGER.info("Opened browser to %s", url)
216
250
  else:
217
251
  LOGGER.warning("Server URL not yet available; please try again.")
@@ -242,8 +276,8 @@ class _ServerTrayApplication:
242
276
 
243
277
  def _launch_gui(
244
278
  self,
245
- _icon: Optional[pystray.Icon] = None,
246
- _item: Optional[pystray.MenuItem] = None,
279
+ _icon: Optional[Any] = None,
280
+ _item: Optional[Any] = None,
247
281
  ) -> None:
248
282
  """Launch the Talks Reducer GUI in a background subprocess."""
249
283
 
@@ -274,21 +308,27 @@ class _ServerTrayApplication:
274
308
 
275
309
  def _handle_quit(
276
310
  self,
277
- icon: Optional[pystray.Icon] = None,
278
- _item: Optional[pystray.MenuItem] = None,
311
+ icon: Optional[Any] = None,
312
+ _item: Optional[Any] = None,
279
313
  ) -> None:
280
314
  self.stop()
281
315
  if icon is not None:
282
- icon.stop()
316
+ stop_method = getattr(icon, "stop", None)
317
+ if callable(stop_method):
318
+ with suppress(Exception):
319
+ stop_method()
283
320
 
284
321
  # Public API -------------------------------------------------------
285
322
 
286
- def _await_server_start(self, icon: Optional[pystray.Icon]) -> None:
323
+ def _await_server_start(self, icon: Optional[Any]) -> None:
287
324
  """Wait for the server to signal readiness or trigger shutdown on failure."""
288
325
 
289
- if self._ready_event.wait(timeout=30):
290
- if self._open_browser_on_start and not self._stop_event.is_set():
291
- self._handle_open_webui()
326
+ if self._server_ready_event.wait(timeout=30):
327
+ try:
328
+ if self._open_browser_on_start and not self._stop_event.is_set():
329
+ self._handle_open_webui()
330
+ finally:
331
+ self._ready_event.set()
292
332
  return
293
333
 
294
334
  if self._stop_event.is_set():
@@ -301,8 +341,10 @@ class _ServerTrayApplication:
301
341
  LOGGER.error("%s", error)
302
342
 
303
343
  if icon is not None:
304
- with suppress(Exception):
305
- icon.notify("Talks Reducer server failed to start.")
344
+ notify = getattr(icon, "notify", None)
345
+ if callable(notify):
346
+ with suppress(Exception):
347
+ notify("Talks Reducer server failed to start.")
306
348
 
307
349
  self.stop()
308
350
 
@@ -334,17 +376,17 @@ class _ServerTrayApplication:
334
376
  f" v{APP_VERSION}" if APP_VERSION and APP_VERSION != "unknown" else ""
335
377
  )
336
378
  version_label = f"Talks Reducer{version_suffix}"
337
- menu = pystray.Menu(
338
- pystray.MenuItem(version_label, None, enabled=False),
339
- pystray.MenuItem(
379
+ menu = self._tray_backend.Menu(
380
+ self._tray_backend.MenuItem(version_label, None, enabled=False),
381
+ self._tray_backend.MenuItem(
340
382
  "Open GUI",
341
383
  self._launch_gui,
342
384
  default=True,
343
385
  ),
344
- pystray.MenuItem("Open WebUI", self._handle_open_webui),
345
- pystray.MenuItem("Quit", self._handle_quit),
386
+ self._tray_backend.MenuItem("Open WebUI", self._handle_open_webui),
387
+ self._tray_backend.MenuItem("Quit", self._handle_quit),
346
388
  )
347
- self._icon = pystray.Icon(
389
+ self._icon = self._tray_backend.Icon(
348
390
  "talks-reducer",
349
391
  icon_image,
350
392
  f"{version_label} Server",
@@ -380,12 +422,17 @@ class _ServerTrayApplication:
380
422
  """Stop the tray icon and shut down the Gradio server."""
381
423
 
382
424
  self._stop_event.set()
425
+ self._server_ready_event.set()
426
+ self._ready_event.set()
383
427
 
384
428
  if self._icon is not None:
385
429
  with suppress(Exception):
386
- self._icon.visible = False
387
- with suppress(Exception):
388
- self._icon.stop()
430
+ if hasattr(self._icon, "visible"):
431
+ self._icon.visible = False
432
+ stop_method = getattr(self._icon, "stop", None)
433
+ if callable(stop_method):
434
+ with suppress(Exception):
435
+ stop_method()
389
436
 
390
437
  self._stop_gui()
391
438
 
@@ -419,6 +466,42 @@ class _ServerTrayApplication:
419
466
  self._gui_process = None
420
467
 
421
468
 
469
+ def create_tray_app(
470
+ *,
471
+ host: Optional[str],
472
+ port: int,
473
+ share: bool,
474
+ open_browser: bool,
475
+ tray_mode: str,
476
+ ) -> _ServerTrayApplication:
477
+ """Build a :class:`_ServerTrayApplication` wired to production dependencies."""
478
+
479
+ if tray_mode != "headless" and (
480
+ pystray is None or PYSTRAY_IMPORT_ERROR is not None
481
+ ):
482
+ raise RuntimeError(
483
+ "System tray mode requires the 'pystray' dependency. Install it with "
484
+ "`pip install pystray` or `pip install talks-reducer[dev]` and try again."
485
+ ) from PYSTRAY_IMPORT_ERROR
486
+
487
+ tray_backend: Any
488
+ if pystray is None:
489
+ tray_backend = _HeadlessTrayBackend()
490
+ else:
491
+ tray_backend = pystray
492
+
493
+ return _ServerTrayApplication(
494
+ host=host,
495
+ port=port,
496
+ share=share,
497
+ open_browser=open_browser,
498
+ tray_mode=tray_mode,
499
+ tray_backend=tray_backend,
500
+ build_interface=build_interface,
501
+ open_browser_callback=webbrowser.open,
502
+ )
503
+
504
+
422
505
  def main(argv: Optional[Sequence[str]] = None) -> None:
423
506
  """Launch the Gradio server with a companion system tray icon."""
424
507
 
@@ -477,13 +560,7 @@ def main(argv: Optional[Sequence[str]] = None) -> None:
477
560
  datefmt="%H:%M:%S",
478
561
  )
479
562
 
480
- if args.tray_mode != "headless" and PYSTRAY_IMPORT_ERROR is not None:
481
- raise RuntimeError(
482
- "System tray mode requires the 'pystray' dependency. Install it with "
483
- "`pip install pystray` or `pip install talks-reducer[dev]` and try again."
484
- ) from PYSTRAY_IMPORT_ERROR
485
-
486
- app = _ServerTrayApplication(
563
+ app = create_tray_app(
487
564
  host=args.host,
488
565
  port=args.port,
489
566
  share=args.share,
@@ -499,7 +576,7 @@ def main(argv: Optional[Sequence[str]] = None) -> None:
499
576
  app.stop()
500
577
 
501
578
 
502
- __all__ = ["main"]
579
+ __all__ = ["create_tray_app", "main"]
503
580
 
504
581
 
505
582
  if __name__ == "__main__": # pragma: no cover - convenience entry point
@@ -8,7 +8,7 @@ import shutil
8
8
  import time
9
9
  from contextlib import suppress
10
10
  from pathlib import Path
11
- from typing import Callable, Optional, Sequence, Tuple
11
+ from typing import Any, AsyncIterator, Callable, Optional, Sequence, Tuple
12
12
 
13
13
  from gradio_client import Client
14
14
  from gradio_client import file as gradio_file
@@ -20,6 +20,55 @@ except ImportError: # pragma: no cover - allow running as script
20
20
  from talks_reducer.pipeline import ProcessingAborted
21
21
 
22
22
 
23
+ class StreamingJob:
24
+ """Adapter that provides a consistent interface for streaming jobs."""
25
+
26
+ def __init__(self, job: Any) -> None:
27
+ self._job = job
28
+
29
+ @property
30
+ def raw(self) -> Any:
31
+ """Return the wrapped job instance."""
32
+
33
+ return self._job
34
+
35
+ @property
36
+ def supports_streaming(self) -> bool:
37
+ """Return ``True`` when the remote job can stream async updates."""
38
+
39
+ communicator = getattr(self._job, "communicator", None)
40
+ return communicator is not None
41
+
42
+ async def async_iter_updates(self) -> AsyncIterator[Any]:
43
+ """Yield updates from the wrapped job asynchronously."""
44
+
45
+ async for update in self._job: # type: ignore[async-for]
46
+ yield update
47
+
48
+ def status(self) -> Any:
49
+ """Return the latest status update from the job when available."""
50
+
51
+ status_method = getattr(self._job, "status", None)
52
+ if callable(status_method):
53
+ return status_method()
54
+ raise AttributeError("Wrapped job does not expose a status() method")
55
+
56
+ def outputs(self) -> Any:
57
+ """Return cached outputs from the job when available."""
58
+
59
+ outputs_method = getattr(self._job, "outputs", None)
60
+ if callable(outputs_method):
61
+ return outputs_method()
62
+ raise AttributeError("Wrapped job does not expose an outputs() method")
63
+
64
+ def cancel(self) -> None:
65
+ """Cancel the remote job when supported."""
66
+
67
+ cancel_method = getattr(self._job, "cancel", None)
68
+ if callable(cancel_method):
69
+ cancel_method()
70
+
71
+
23
72
  def send_video(
24
73
  input_path: Path,
25
74
  output_path: Optional[Path],
@@ -35,6 +84,10 @@ def send_video(
35
84
  progress_callback: Optional[
36
85
  Callable[[str, Optional[int], Optional[int], str], None]
37
86
  ] = None,
87
+ client_factory: Optional[Callable[[str], Client]] = None,
88
+ job_factory: Optional[
89
+ Callable[[Client, Tuple[Any, ...], dict[str, Any]], Any]
90
+ ] = None,
38
91
  ) -> Tuple[Path, str, str]:
39
92
  """Upload *input_path* to the Gradio server and download the processed video.
40
93
 
@@ -45,15 +98,23 @@ def send_video(
45
98
  if not input_path.exists():
46
99
  raise FileNotFoundError(f"Input file does not exist: {input_path}")
47
100
 
48
- client = Client(server_url)
49
- job = client.submit(
101
+ client_builder = client_factory or Client
102
+ client = client_builder(server_url)
103
+ submit_args: Tuple[Any, ...] = (
50
104
  gradio_file(str(input_path)),
51
105
  bool(small),
52
106
  silent_threshold,
53
107
  sounded_speed,
54
108
  silent_speed,
55
- api_name="/process_video",
56
109
  )
110
+ submit_kwargs: dict[str, Any] = {"api_name": "/process_video"}
111
+
112
+ if job_factory is not None:
113
+ job = job_factory(client, submit_args, submit_kwargs)
114
+ else:
115
+ job = client.submit(*submit_args, **submit_kwargs)
116
+
117
+ streaming_job = StreamingJob(job)
57
118
 
58
119
  cancelled = False
59
120
 
@@ -62,7 +123,7 @@ def send_video(
62
123
  if should_cancel and should_cancel():
63
124
  if not cancelled:
64
125
  with suppress(Exception):
65
- job.cancel()
126
+ streaming_job.cancel()
66
127
  cancelled = True
67
128
  raise ProcessingAborted("Remote processing cancelled by user.")
68
129
 
@@ -85,7 +146,7 @@ def send_video(
85
146
  if should_cancel is not None:
86
147
  stream_kwargs["cancel_callback"] = _cancel_if_requested
87
148
  consumed_stream = _stream_job_updates(
88
- job,
149
+ streaming_job,
89
150
  _emit_new_lines,
90
151
  **stream_kwargs,
91
152
  )
@@ -190,7 +251,7 @@ def _emit_progress_update(
190
251
 
191
252
 
192
253
  async def _pump_job_updates(
193
- job,
254
+ job: StreamingJob,
194
255
  emit_log: Callable[[str], None],
195
256
  progress_callback: Optional[
196
257
  Callable[[str, Optional[int], Optional[int], str], None]
@@ -199,7 +260,7 @@ async def _pump_job_updates(
199
260
  ) -> None:
200
261
  """Consume asynchronous updates from *job* and emit logs and progress."""
201
262
 
202
- async for update in job: # type: ignore[async-for]
263
+ async for update in job.async_iter_updates():
203
264
  if cancel_callback:
204
265
  cancel_callback()
205
266
  update_type = getattr(update, "type", "status")
@@ -242,15 +303,18 @@ def _poll_job_updates(
242
303
  ) -> None:
243
304
  """Poll *job* for outputs and status updates when async streaming is unavailable."""
244
305
 
306
+ streaming_job = job if isinstance(job, StreamingJob) else StreamingJob(job)
307
+ raw_job = streaming_job.raw
308
+
245
309
  while True:
246
310
  if cancel_callback:
247
311
  cancel_callback()
248
- if job.done():
312
+ if hasattr(raw_job, "done") and raw_job.done():
249
313
  break
250
314
 
251
315
  status: Optional[StatusUpdate] = None
252
316
  with suppress(Exception):
253
- status = job.status() # type: ignore[assignment]
317
+ status = streaming_job.status() # type: ignore[assignment]
254
318
 
255
319
  if status is not None:
256
320
  if progress_callback:
@@ -268,7 +332,7 @@ def _poll_job_updates(
268
332
 
269
333
  outputs = []
270
334
  with suppress(Exception):
271
- outputs = job.outputs()
335
+ outputs = streaming_job.outputs()
272
336
  if outputs:
273
337
  latest = outputs[-1]
274
338
  if isinstance(latest, (list, tuple)) and len(latest) == 4:
@@ -280,7 +344,7 @@ def _poll_job_updates(
280
344
 
281
345
 
282
346
  def _stream_job_updates(
283
- job,
347
+ job: StreamingJob,
284
348
  emit_log: Callable[[str], None],
285
349
  *,
286
350
  progress_callback: Optional[
@@ -294,8 +358,7 @@ def _stream_job_updates(
294
358
  generator-based fallback should be used.
295
359
  """
296
360
 
297
- communicator = getattr(job, "communicator", None)
298
- if communicator is None:
361
+ if not job.supports_streaming:
299
362
  return False
300
363
 
301
364
  try:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: talks-reducer
3
- Version: 0.7.2
3
+ Version: 0.8.0
4
4
  Summary: CLI for speeding up long-form talks by removing silence
5
5
  Author: Talks Reducer Maintainers
6
6
  License-Expression: MIT
@@ -26,7 +26,7 @@ Requires-Dist: bump-my-version>=0.5.0; extra == "dev"
26
26
  Requires-Dist: pyinstaller>=6.4.0; extra == "dev"
27
27
  Dynamic: license-file
28
28
 
29
- # Talks Reducer
29
+ # Talks Reducer [![Coverage Status](https://coveralls.io/repos/github/popstas/talks-reducer/badge.svg?branch=master)](https://coveralls.io/github/popstas/talks-reducer?branch=master)
30
30
 
31
31
  Talks Reducer shortens long-form presentations by removing silent gaps and optionally re-encoding them to smaller files. The
32
32
  project was renamed from **jumpcutter** to emphasize its focus on conference talks and screencasts.