talks-reducer 0.6.1__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
talks_reducer/server.py CHANGED
@@ -11,7 +11,7 @@ from contextlib import AbstractContextManager, suppress
11
11
  from pathlib import Path
12
12
  from queue import SimpleQueue
13
13
  from threading import Thread
14
- from typing import Callable, Iterator, Optional, Sequence
14
+ from typing import Callable, Iterator, Optional, Sequence, cast
15
15
 
16
16
  import gradio as gr
17
17
 
@@ -144,8 +144,14 @@ class GradioProgressReporter(SignalProgressReporter):
144
144
  self._progress_callback(bounded_current, total_value, display_desc)
145
145
 
146
146
 
147
- _FAVICON_PATH = Path(__file__).resolve().parent.parent / "docs" / "assets" / "icon.ico"
148
- _FAVICON_PATH_STR = str(_FAVICON_PATH) if _FAVICON_PATH.exists() else None
147
+ _FAVICON_CANDIDATES = (
148
+ Path(__file__).resolve().parent / "resources" / "icons" / "icon.ico",
149
+ Path(__file__).resolve().parent.parent / "docs" / "assets" / "icon.ico",
150
+ )
151
+ _FAVICON_PATH: Optional[Path] = next(
152
+ (path for path in _FAVICON_CANDIDATES if path.exists()), None
153
+ )
154
+ _FAVICON_PATH_STR = str(_FAVICON_PATH) if _FAVICON_PATH else None
149
155
  _WORKSPACES: list[Path] = []
150
156
 
151
157
 
@@ -242,6 +248,9 @@ def _format_summary(result: ProcessingResult) -> str:
242
248
  def process_video(
243
249
  file_path: Optional[str],
244
250
  small_video: bool,
251
+ silent_threshold: Optional[float] = None,
252
+ sounded_speed: Optional[float] = None,
253
+ silent_speed: Optional[float] = None,
245
254
  progress: Optional[gr.Progress] = gr.Progress(track_tqdm=False),
246
255
  ) -> Iterator[tuple[Optional[str], str, str, Optional[str]]]:
247
256
  """Run the Talks Reducer pipeline for a single uploaded file."""
@@ -261,7 +270,7 @@ def process_video(
261
270
  if progress is not None:
262
271
 
263
272
  def _callback(current: int, total: int, desc: str) -> None:
264
- progress(current, total=total, desc=desc)
273
+ events.put(("progress", (current, total, desc)))
265
274
 
266
275
  progress_callback = _callback
267
276
 
@@ -275,11 +284,20 @@ def process_video(
275
284
  log_callback=_log_callback,
276
285
  )
277
286
 
287
+ option_kwargs: dict[str, float] = {}
288
+ if silent_threshold is not None:
289
+ option_kwargs["silent_threshold"] = float(silent_threshold)
290
+ if sounded_speed is not None:
291
+ option_kwargs["sounded_speed"] = float(sounded_speed)
292
+ if silent_speed is not None:
293
+ option_kwargs["silent_speed"] = float(silent_speed)
294
+
278
295
  options = ProcessingOptions(
279
296
  input_file=input_path,
280
297
  output_file=output_file,
281
298
  temp_folder=temp_folder,
282
299
  small=small_video,
300
+ **option_kwargs,
283
301
  )
284
302
 
285
303
  def _worker() -> None:
@@ -317,6 +335,11 @@ def process_video(
317
335
  gr.update(),
318
336
  gr.update(),
319
337
  )
338
+ elif kind == "progress":
339
+ if progress is not None:
340
+ current, total, desc = cast(tuple[int, int, str], payload)
341
+ percent = current / total if total > 0 else 0
342
+ progress(percent, total=total, desc=desc)
320
343
  elif kind == "result":
321
344
  final_result = payload # type: ignore[assignment]
322
345
  elif kind == "error":
@@ -365,14 +388,39 @@ def build_interface() -> gr.Blocks:
365
388
  """.strip()
366
389
  )
367
390
 
368
- with gr.Row():
391
+ with gr.Column():
369
392
  file_input = gr.File(
370
393
  label="Video file",
371
394
  file_types=["video"],
372
395
  type="filepath",
373
396
  )
397
+
398
+ with gr.Row():
374
399
  small_checkbox = gr.Checkbox(label="Small video", value=True)
375
400
 
401
+ with gr.Column():
402
+ silent_speed_input = gr.Slider(
403
+ minimum=1.0,
404
+ maximum=10.0,
405
+ value=4.0,
406
+ step=0.1,
407
+ label="Silent speed",
408
+ )
409
+ sounded_speed_input = gr.Slider(
410
+ minimum=0.5,
411
+ maximum=3.0,
412
+ value=1.0,
413
+ step=0.05,
414
+ label="Sounded speed",
415
+ )
416
+ silent_threshold_input = gr.Slider(
417
+ minimum=0.0,
418
+ maximum=1.0,
419
+ value=0.05,
420
+ step=0.01,
421
+ label="Silent threshold",
422
+ )
423
+
376
424
  video_output = gr.Video(label="Processed video")
377
425
  summary_output = gr.Markdown()
378
426
  download_output = gr.File(label="Download processed file", interactive=False)
@@ -380,7 +428,13 @@ def build_interface() -> gr.Blocks:
380
428
 
381
429
  file_input.upload(
382
430
  process_video,
383
- inputs=[file_input, small_checkbox],
431
+ inputs=[
432
+ file_input,
433
+ small_checkbox,
434
+ silent_threshold_input,
435
+ sounded_speed_input,
436
+ silent_speed_input,
437
+ ],
384
438
  outputs=[video_output, log_output, summary_output, download_output],
385
439
  queue=True,
386
440
  api_name="process_video",
@@ -123,14 +123,12 @@ def _iter_icon_candidates() -> Iterator[Path]:
123
123
  if candidate_root not in expanded_roots:
124
124
  expanded_roots.append(candidate_root)
125
125
 
126
- if sys.platform == "win32":
127
- icon_names = ("icon.ico", "icon.png")
128
- else:
129
- icon_names = ("icon.png", "icon.ico")
126
+ icon_names = ("icon.ico", "icon.png") if sys.platform == "win32" else ("icon.png", "icon.ico")
130
127
  relative_paths = (
128
+ Path("talks_reducer") / "resources" / "icons",
129
+ Path("talks_reducer") / "assets",
131
130
  Path("docs") / "assets",
132
131
  Path("assets"),
133
- Path("talks_reducer") / "assets",
134
132
  Path(""),
135
133
  )
136
134
 
@@ -409,7 +407,7 @@ class _ServerTrayApplication:
409
407
  try:
410
408
  LOGGER.info("Launching Talks Reducer GUI via %s", sys.executable)
411
409
  process = subprocess.Popen(
412
- [sys.executable, "-m", "talks_reducer.gui", "--no-tray"]
410
+ [sys.executable, "-m", "talks_reducer.gui"]
413
411
  )
414
412
  except Exception as exc: # pragma: no cover - platform specific
415
413
  LOGGER.error("Failed to launch Talks Reducer GUI: %s", exc)
@@ -14,6 +14,11 @@ from gradio_client import Client
14
14
  from gradio_client import file as gradio_file
15
15
  from gradio_client.client import Status, StatusUpdate
16
16
 
17
+ try:
18
+ from .pipeline import ProcessingAborted
19
+ except ImportError: # pragma: no cover - allow running as script
20
+ from talks_reducer.pipeline import ProcessingAborted
21
+
17
22
 
18
23
  def send_video(
19
24
  input_path: Path,
@@ -21,13 +26,21 @@ def send_video(
21
26
  server_url: str,
22
27
  small: bool = False,
23
28
  *,
29
+ silent_threshold: Optional[float] = None,
30
+ sounded_speed: Optional[float] = None,
31
+ silent_speed: Optional[float] = None,
24
32
  log_callback: Optional[Callable[[str], None]] = None,
25
33
  stream_updates: bool = False,
34
+ should_cancel: Optional[Callable[[], bool]] = None,
26
35
  progress_callback: Optional[
27
36
  Callable[[str, Optional[int], Optional[int], str], None]
28
37
  ] = None,
29
38
  ) -> Tuple[Path, str, str]:
30
- """Upload *input_path* to the Gradio server and download the processed video."""
39
+ """Upload *input_path* to the Gradio server and download the processed video.
40
+
41
+ When *should_cancel* returns ``True`` the remote job is cancelled and a
42
+ :class:`ProcessingAborted` exception is raised.
43
+ """
31
44
 
32
45
  if not input_path.exists():
33
46
  raise FileNotFoundError(f"Input file does not exist: {input_path}")
@@ -36,9 +49,23 @@ def send_video(
36
49
  job = client.submit(
37
50
  gradio_file(str(input_path)),
38
51
  bool(small),
52
+ silent_threshold,
53
+ sounded_speed,
54
+ silent_speed,
39
55
  api_name="/process_video",
40
56
  )
41
57
 
58
+ cancelled = False
59
+
60
+ def _cancel_if_requested() -> None:
61
+ nonlocal cancelled
62
+ if should_cancel and should_cancel():
63
+ if not cancelled:
64
+ with suppress(Exception):
65
+ job.cancel()
66
+ cancelled = True
67
+ raise ProcessingAborted("Remote processing cancelled by user.")
68
+
42
69
  printed_lines = 0
43
70
 
44
71
  def _emit_new_lines(log_text: str) -> None:
@@ -54,21 +81,31 @@ def send_video(
54
81
  consumed_stream = False
55
82
 
56
83
  if stream_updates:
84
+ stream_kwargs: dict[str, object] = {"progress_callback": progress_callback}
85
+ if should_cancel is not None:
86
+ stream_kwargs["cancel_callback"] = _cancel_if_requested
57
87
  consumed_stream = _stream_job_updates(
58
88
  job,
59
89
  _emit_new_lines,
60
- progress_callback=progress_callback,
90
+ **stream_kwargs,
61
91
  )
62
92
 
63
93
  if not consumed_stream:
64
94
  for output in job:
95
+ _cancel_if_requested()
65
96
  if not isinstance(output, (list, tuple)) or len(output) != 4:
66
97
  continue
67
98
  log_text_candidate = output[1] or ""
68
99
  if isinstance(log_text_candidate, str):
69
100
  _emit_new_lines(log_text_candidate)
70
101
 
71
- prediction = job.result()
102
+ _cancel_if_requested()
103
+
104
+ try:
105
+ prediction = job.result()
106
+ except Exception:
107
+ _cancel_if_requested()
108
+ raise
72
109
 
73
110
  try:
74
111
  video_path, log_text, summary, download_path = prediction
@@ -86,6 +123,8 @@ def send_video(
86
123
  if not download_path:
87
124
  raise RuntimeError("Server did not return a processed file")
88
125
 
126
+ _cancel_if_requested()
127
+
89
128
  download_source = Path(str(download_path))
90
129
  if output_path is None:
91
130
  destination = Path.cwd() / download_source.name
@@ -156,10 +195,13 @@ async def _pump_job_updates(
156
195
  progress_callback: Optional[
157
196
  Callable[[str, Optional[int], Optional[int], str], None]
158
197
  ],
198
+ cancel_callback: Optional[Callable[[], None]] = None,
159
199
  ) -> None:
160
200
  """Consume asynchronous updates from *job* and emit logs and progress."""
161
201
 
162
202
  async for update in job: # type: ignore[async-for]
203
+ if cancel_callback:
204
+ cancel_callback()
163
205
  update_type = getattr(update, "type", "status")
164
206
  if update_type == "output":
165
207
  outputs = getattr(update, "outputs", None) or []
@@ -195,11 +237,14 @@ def _poll_job_updates(
195
237
  Callable[[str, Optional[int], Optional[int], str], None]
196
238
  ],
197
239
  *,
240
+ cancel_callback: Optional[Callable[[], None]] = None,
198
241
  interval: float = 0.25,
199
242
  ) -> None:
200
243
  """Poll *job* for outputs and status updates when async streaming is unavailable."""
201
244
 
202
245
  while True:
246
+ if cancel_callback:
247
+ cancel_callback()
203
248
  if job.done():
204
249
  break
205
250
 
@@ -241,6 +286,7 @@ def _stream_job_updates(
241
286
  progress_callback: Optional[
242
287
  Callable[[str, Optional[int], Optional[int], str], None]
243
288
  ] = None,
289
+ cancel_callback: Optional[Callable[[], None]] = None,
244
290
  ) -> bool:
245
291
  """Attempt to stream updates directly from *job*.
246
292
 
@@ -258,10 +304,16 @@ def _stream_job_updates(
258
304
  job,
259
305
  emit_log,
260
306
  progress_callback,
307
+ cancel_callback,
261
308
  )
262
309
  )
263
310
  except RuntimeError:
264
- _poll_job_updates(job, emit_log, progress_callback)
311
+ _poll_job_updates(
312
+ job,
313
+ emit_log,
314
+ progress_callback,
315
+ cancel_callback=cancel_callback,
316
+ )
265
317
 
266
318
  return True
267
319
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: talks-reducer
3
- Version: 0.6.1
3
+ Version: 0.7.0
4
4
  Summary: CLI for speeding up long-form talks by removing silence
5
5
  Author: Talks Reducer Maintainers
6
6
  License-Expression: MIT
@@ -26,7 +26,8 @@ Requires-Dist: bump-my-version>=0.5.0; extra == "dev"
26
26
  Requires-Dist: pyinstaller>=6.4.0; extra == "dev"
27
27
  Dynamic: license-file
28
28
 
29
- # Talks Reducer
29
+ # Talks Reducer
30
+
30
31
  Talks Reducer shortens long-form presentations by removing silent gaps and optionally re-encoding them to smaller files. The
31
32
  project was renamed from **jumpcutter** to emphasize its focus on conference talks and screencasts.
32
33
 
@@ -47,10 +48,10 @@ Go to the [releases page](https://github.com/popstas/talks-reducer/releases) and
47
48
  - **Windows** — `talks-reducer-windows-0.4.0.zip`
48
49
  - **macOS** — `talks-reducer.app.zip`
49
50
 
50
- > **Troubleshooting:** If launching the bundle (or running `python talks_reducer/gui.py`) prints `macOS 26 (2600) or later required, have instead 16 (1600)!`, make sure you're using a Python build that ships a modern Tk. The stock [python.org 3.13.5 installer](https://www.python.org/downloads/release/python-3135/) includes Tk 8.6 and has been verified to work.
51
+ > **Troubleshooting:** If launching the bundle (or running `python -m talks_reducer.gui`) prints `macOS 26 (2600) or later required, have instead 16 (1600)!`, make sure you're using a Python build that ships a modern Tk. The stock [python.org 3.13.5 installer](https://www.python.org/downloads/release/python-3135/) includes Tk 8.6 and has been verified to work.
51
52
 
52
- When extracted on Windows the bundled `talks-reducer.exe` behaves like the
53
- `python talks_reducer/gui.py` entry point: double-clicking it launches the GUI
53
+ When extracted on Windows the bundled `talks-reducer.exe` behaves like running
54
+ `python -m talks_reducer.gui`: double-clicking it launches the GUI
54
55
  and passing a video file path (for example via *Open with…* or drag-and-drop
55
56
  onto the executable) automatically queues that recording for processing.
56
57
 
@@ -80,6 +81,10 @@ talks-reducer --url http://localhost:9005 demo.mp4
80
81
  talks-reducer --host 192.168.1.42 demo.mp4
81
82
  ```
82
83
 
84
+ Remote jobs respect the same timing controls as the local CLI. Provide
85
+ `--silent-threshold`, `--sounded-speed`, or `--silent-speed` to tweak how the
86
+ server trims and accelerates segments without falling back to local mode.
87
+
83
88
  Want to see progress as the remote server works? Add `--server-stream` so the
84
89
  CLI prints live progress bars and log lines while you wait for the download.
85
90
 
@@ -98,6 +103,10 @@ Prefer a lightweight browser interface? Launch the Gradio-powered simple mode wi
98
103
  talks-reducer server
99
104
  ```
100
105
 
106
+ The browser UI mirrors the CLI timing controls with sliders for the silent
107
+ threshold and playback speeds, so you can tune exports without leaving the
108
+ remote workflow.
109
+
101
110
  Want the server to live in your system tray instead of a terminal window? Use:
102
111
 
103
112
  ```sh
@@ -116,14 +125,15 @@ running Talks Reducer version and includes an **Open GUI**
116
125
  item (also triggered by double-clicking the icon) that launches the desktop
117
126
  Talks Reducer interface alongside an **Open WebUI** entry that opens the Gradio
118
127
  page in your browser. Close the GUI window to return to the tray without
119
- stopping the server. Launching the GUI directly now starts the tray-backed
120
- server in the background before the window appears so the icon stays available
121
- after you close it; add `--no-tray` when running `python -m talks_reducer.gui`
122
- if you prefer to skip the background server entirely. The tray command itself
123
- never launches the GUI automatically, so use the menu item (or rerun the GUI
124
- with `--no-tray`) whenever you want to reopen it. The tray no longer opens a
125
- browser automatically—pass `--open-browser` if you prefer the web page to
126
- launch as soon as the server is ready.
128
+ stopping the server. Launch the tray explicitly whenever you need it—either run
129
+ `talks-reducer server-tray` directly or start the GUI with
130
+ `python -m talks_reducer.gui --server` to boot the tray-managed server instead
131
+ of the desktop window. The GUI now runs standalone and no longer spawns the tray
132
+ automatically; the deprecated `--no-tray` flag is ignored for compatibility.
133
+ The tray command itself never launches the GUI automatically, so use the menu
134
+ item (or relaunch the GUI separately) whenever you want to reopen it. The tray
135
+ no longer opens a browser automatically—pass `--open-browser` if you prefer the
136
+ web page to launch as soon as the server is ready.
127
137
 
128
138
  This opens a local web page featuring a drag-and-drop upload zone, a **Small video** checkbox that mirrors the CLI preset, a live
129
139
  progress indicator, and automatic previews of the processed output. The page header and browser tab title include the current
@@ -0,0 +1,29 @@
1
+ talks_reducer/__about__.py,sha256=94GwXssxb0xzy1jnupX-CAEoTnzbW-oWTdZFg_V3R6Q,92
2
+ talks_reducer/__init__.py,sha256=Kzh1hXaw6Vq3DyTqrnJGOq8pn0P8lvaDcsg1bFUjFKk,208
3
+ talks_reducer/__main__.py,sha256=azR_vh8HFPLaOnh-L6gUFWsL67I6iHtbeH5rQhsipGY,299
4
+ talks_reducer/audio.py,sha256=sjHMeY0H9ESG-Gn5BX0wFRBX7sXjWwsgS8u9Vb0bJ88,4396
5
+ talks_reducer/chunks.py,sha256=IpdZxRFPURSG5wP-OQ_p09CVP8wcKwIFysV29zOTSWI,2959
6
+ talks_reducer/cli.py,sha256=MeL5feATcJc0bGPDuW_L3HgepKQUi335zs3HVg47WrE,16474
7
+ talks_reducer/discovery.py,sha256=BJ-iMir65cJMs0u-_EYdknBQT_grvCZaJNOx1xGi2PU,4590
8
+ talks_reducer/ffmpeg.py,sha256=dsHBOBcr5XCSg0q3xmzLOcibBiEdyrXdEQa-ze5vQsM,12551
9
+ talks_reducer/models.py,sha256=a1cHCVTNTJYh9I437CuANiaz5R_s-uECeGyK7WB67HQ,2018
10
+ talks_reducer/pipeline.py,sha256=OGZG_3G1fh6LFQw9NuhnLq7gwJ5YcJ6l76QNWJydD7c,13630
11
+ talks_reducer/progress.py,sha256=Mh43M6VWhjjUv9CI22xfD2EJ_7Aq3PCueqefQ9Bd5-o,4565
12
+ talks_reducer/server.py,sha256=CLcgEyfBjsglSG1VkqiaP-4NsZZklL5o7ayYwnNMqbs,15782
13
+ talks_reducer/server_tray.py,sha256=GBjx7Fr18Uy5O38ZjM5VXR77ou2_AEUqx2wN8MOZuss,23380
14
+ talks_reducer/service_client.py,sha256=8C2v2aNj8UAECfy1pw7oIzCK3Ktx5E6kZoNSYWH-8m8,12656
15
+ talks_reducer/version_utils.py,sha256=TkYrTznVb2JqxFXzVzPd6PEnYP2MH7dxKl1J4-3DjMA,755
16
+ talks_reducer/gui/__init__.py,sha256=UQJtyb87wwZyvauPo0mM_aiau9NAhKbl4ggwJoPCNC0,59870
17
+ talks_reducer/gui/__main__.py,sha256=9YWkGopLypanfMMq_RoQjjpPScTOxA7-biqMhQq-SSM,140
18
+ talks_reducer/gui/discovery.py,sha256=6AXPcFGXqHZNhSBE1O5PyoH_CEMCb0Jk-9JGFwyAuRk,4108
19
+ talks_reducer/gui/layout.py,sha256=rFzNt78sf6TQzHkEBUmINdD5-iJAWkBKHkIo_v5f7iU,17146
20
+ talks_reducer/gui/preferences.py,sha256=ahDLPNIzvL71sw8WvgY9-TV_kaWTl_JTkn1gf2Z1EaA,3531
21
+ talks_reducer/gui/remote.py,sha256=92HebrIo009GgRD7RBriw9yR8sbYHocsPzmjPe4ybhA,12071
22
+ talks_reducer/gui/theme.py,sha256=ueqpPVPOwnLkeHlOlWkUcdcoClJrAqz9LWT79p33Xic,7718
23
+ talks_reducer/resources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
+ talks_reducer-0.7.0.dist-info/licenses/LICENSE,sha256=jN17mHNR3e84awmH3AbpWBcBDBzPxEH0rcOFoj1s7sQ,1124
25
+ talks_reducer-0.7.0.dist-info/METADATA,sha256=mVqCz2mmbd4J2BiQ384kY52kQ5bdpGmr_60ca_FAqiA,8810
26
+ talks_reducer-0.7.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
27
+ talks_reducer-0.7.0.dist-info/entry_points.txt,sha256=X2pjoh2vWBXXExVWorv1mbA1aTEVP3fyuZH4AixqZK4,208
28
+ talks_reducer-0.7.0.dist-info/top_level.txt,sha256=pJWGcy__LR9JIEKH3QJyFmk9XrIsiFtqvuMNxFdIzDU,14
29
+ talks_reducer-0.7.0.dist-info/RECORD,,
@@ -1,22 +0,0 @@
1
- talks_reducer/__about__.py,sha256=KW-viuoc2AcVVTHYXfURFg9xsicuBA_X9n4tgL9p2ag,92
2
- talks_reducer/__init__.py,sha256=Kzh1hXaw6Vq3DyTqrnJGOq8pn0P8lvaDcsg1bFUjFKk,208
3
- talks_reducer/__main__.py,sha256=azR_vh8HFPLaOnh-L6gUFWsL67I6iHtbeH5rQhsipGY,299
4
- talks_reducer/audio.py,sha256=sjHMeY0H9ESG-Gn5BX0wFRBX7sXjWwsgS8u9Vb0bJ88,4396
5
- talks_reducer/chunks.py,sha256=IpdZxRFPURSG5wP-OQ_p09CVP8wcKwIFysV29zOTSWI,2959
6
- talks_reducer/cli.py,sha256=JH8lvPUyk6jWGcnNRIGJeIh2ZcOvC5CORvj5GLuqq0c,16075
7
- talks_reducer/discovery.py,sha256=BJ-iMir65cJMs0u-_EYdknBQT_grvCZaJNOx1xGi2PU,4590
8
- talks_reducer/ffmpeg.py,sha256=dsHBOBcr5XCSg0q3xmzLOcibBiEdyrXdEQa-ze5vQsM,12551
9
- talks_reducer/gui.py,sha256=mTB4m9LxqqTAaHoNf1QZZ4KfHiSMk0C7SJ5BDhqNR3Q,89777
10
- talks_reducer/models.py,sha256=6Q_8rmHLyImXp88D4B7ptTbFaH_xXa_yxs8A2dypz2Y,2004
11
- talks_reducer/pipeline.py,sha256=naAP8gli9MahoxVdla2tRn2vdDuBBU5ywWkYfZLkMcE,12211
12
- talks_reducer/progress.py,sha256=Mh43M6VWhjjUv9CI22xfD2EJ_7Aq3PCueqefQ9Bd5-o,4565
13
- talks_reducer/server.py,sha256=IzPe0qkZTs8kqoc9gtGDSWtwN2CTWHVcsYzQah-W3cU,13977
14
- talks_reducer/server_tray.py,sha256=9jqYfjBpXj7_V_ZlVc7oSDKYRPpG7sOGoxuKvSGEQGM,23373
15
- talks_reducer/service_client.py,sha256=JnhQhRxVwrGo9eUlduoZ6f_YYyXvjU7hF8UTNprH7TM,10984
16
- talks_reducer/version_utils.py,sha256=TkYrTznVb2JqxFXzVzPd6PEnYP2MH7dxKl1J4-3DjMA,755
17
- talks_reducer-0.6.1.dist-info/licenses/LICENSE,sha256=jN17mHNR3e84awmH3AbpWBcBDBzPxEH0rcOFoj1s7sQ,1124
18
- talks_reducer-0.6.1.dist-info/METADATA,sha256=iTbxOTuZMnT2UG5KmwKDpKuy555CtsTSps7KSPAfJc0,8326
19
- talks_reducer-0.6.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
20
- talks_reducer-0.6.1.dist-info/entry_points.txt,sha256=X2pjoh2vWBXXExVWorv1mbA1aTEVP3fyuZH4AixqZK4,208
21
- talks_reducer-0.6.1.dist-info/top_level.txt,sha256=pJWGcy__LR9JIEKH3QJyFmk9XrIsiFtqvuMNxFdIzDU,14
22
- talks_reducer-0.6.1.dist-info/RECORD,,