talks-reducer 0.5.3__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- talks_reducer/__about__.py +1 -1
- talks_reducer/cli.py +270 -7
- talks_reducer/discovery.py +149 -0
- talks_reducer/gui.py +846 -143
- talks_reducer/pipeline.py +1 -1
- talks_reducer/server.py +104 -23
- talks_reducer/server_tray.py +215 -16
- talks_reducer/service_client.py +258 -4
- {talks_reducer-0.5.3.dist-info → talks_reducer-0.6.0.dist-info}/METADATA +28 -1
- talks_reducer-0.6.0.dist-info/RECORD +21 -0
- talks_reducer-0.5.3.dist-info/RECORD +0 -20
- {talks_reducer-0.5.3.dist-info → talks_reducer-0.6.0.dist-info}/WHEEL +0 -0
- {talks_reducer-0.5.3.dist-info → talks_reducer-0.6.0.dist-info}/entry_points.txt +0 -0
- {talks_reducer-0.5.3.dist-info → talks_reducer-0.6.0.dist-info}/licenses/LICENSE +0 -0
- {talks_reducer-0.5.3.dist-info → talks_reducer-0.6.0.dist-info}/top_level.txt +0 -0
talks_reducer/service_client.py
CHANGED
@@ -3,12 +3,16 @@
|
|
3
3
|
from __future__ import annotations
|
4
4
|
|
5
5
|
import argparse
|
6
|
+
import asyncio
|
6
7
|
import shutil
|
8
|
+
import time
|
9
|
+
from contextlib import suppress
|
7
10
|
from pathlib import Path
|
8
|
-
from typing import Optional, Sequence, Tuple
|
11
|
+
from typing import Callable, Optional, Sequence, Tuple
|
9
12
|
|
10
13
|
from gradio_client import Client
|
11
14
|
from gradio_client import file as gradio_file
|
15
|
+
from gradio_client.client import Status, StatusUpdate
|
12
16
|
|
13
17
|
|
14
18
|
def send_video(
|
@@ -16,6 +20,12 @@ def send_video(
|
|
16
20
|
output_path: Optional[Path],
|
17
21
|
server_url: str,
|
18
22
|
small: bool = False,
|
23
|
+
*,
|
24
|
+
log_callback: Optional[Callable[[str], None]] = None,
|
25
|
+
stream_updates: bool = False,
|
26
|
+
progress_callback: Optional[
|
27
|
+
Callable[[str, Optional[int], Optional[int], str], None]
|
28
|
+
] = None,
|
19
29
|
) -> Tuple[Path, str, str]:
|
20
30
|
"""Upload *input_path* to the Gradio server and download the processed video."""
|
21
31
|
|
@@ -23,17 +33,56 @@ def send_video(
|
|
23
33
|
raise FileNotFoundError(f"Input file does not exist: {input_path}")
|
24
34
|
|
25
35
|
client = Client(server_url)
|
26
|
-
|
36
|
+
job = client.submit(
|
27
37
|
gradio_file(str(input_path)),
|
28
38
|
bool(small),
|
29
39
|
api_name="/process_video",
|
30
40
|
)
|
31
41
|
|
42
|
+
printed_lines = 0
|
43
|
+
|
44
|
+
def _emit_new_lines(log_text: str) -> None:
|
45
|
+
nonlocal printed_lines
|
46
|
+
if log_callback is None or not log_text:
|
47
|
+
return
|
48
|
+
lines = log_text.splitlines()
|
49
|
+
if printed_lines < len(lines):
|
50
|
+
for line in lines[printed_lines:]:
|
51
|
+
log_callback(line)
|
52
|
+
printed_lines = len(lines)
|
53
|
+
|
54
|
+
consumed_stream = False
|
55
|
+
|
56
|
+
if stream_updates:
|
57
|
+
consumed_stream = _stream_job_updates(
|
58
|
+
job,
|
59
|
+
_emit_new_lines,
|
60
|
+
progress_callback=progress_callback,
|
61
|
+
)
|
62
|
+
|
63
|
+
if not consumed_stream:
|
64
|
+
for output in job:
|
65
|
+
if not isinstance(output, (list, tuple)) or len(output) != 4:
|
66
|
+
continue
|
67
|
+
log_text_candidate = output[1] or ""
|
68
|
+
if isinstance(log_text_candidate, str):
|
69
|
+
_emit_new_lines(log_text_candidate)
|
70
|
+
|
71
|
+
prediction = job.result()
|
72
|
+
|
32
73
|
try:
|
33
|
-
|
74
|
+
video_path, log_text, summary, download_path = prediction
|
34
75
|
except (TypeError, ValueError) as exc: # pragma: no cover - defensive
|
35
76
|
raise RuntimeError("Unexpected response from server") from exc
|
36
77
|
|
78
|
+
if isinstance(log_text, str):
|
79
|
+
_emit_new_lines(log_text)
|
80
|
+
else:
|
81
|
+
log_text = ""
|
82
|
+
|
83
|
+
if not download_path:
|
84
|
+
download_path = video_path
|
85
|
+
|
37
86
|
if not download_path:
|
38
87
|
raise RuntimeError("Server did not return a processed file")
|
39
88
|
|
@@ -49,9 +98,174 @@ def send_video(
|
|
49
98
|
if download_source.resolve() != destination.resolve():
|
50
99
|
shutil.copy2(download_source, destination)
|
51
100
|
|
101
|
+
if not isinstance(summary, str):
|
102
|
+
summary = ""
|
103
|
+
if not isinstance(log_text, str):
|
104
|
+
log_text = ""
|
105
|
+
|
52
106
|
return destination, summary, log_text
|
53
107
|
|
54
108
|
|
109
|
+
def _coerce_int(value: object) -> Optional[int]:
|
110
|
+
"""Return *value* as an ``int`` when possible."""
|
111
|
+
|
112
|
+
try:
|
113
|
+
if value is None:
|
114
|
+
return None
|
115
|
+
return int(value)
|
116
|
+
except (TypeError, ValueError):
|
117
|
+
return None
|
118
|
+
|
119
|
+
|
120
|
+
def _emit_progress_update(
|
121
|
+
callback: Callable[[str, Optional[int], Optional[int], str], None],
|
122
|
+
unit: object,
|
123
|
+
) -> None:
|
124
|
+
"""Normalize a progress unit and forward it to *callback*."""
|
125
|
+
|
126
|
+
if unit is None:
|
127
|
+
return
|
128
|
+
|
129
|
+
if hasattr(unit, "__dict__"):
|
130
|
+
data = unit
|
131
|
+
desc = getattr(data, "desc", None)
|
132
|
+
length = getattr(data, "length", None)
|
133
|
+
index = getattr(data, "index", None)
|
134
|
+
progress = getattr(data, "progress", None)
|
135
|
+
unit_name = getattr(data, "unit", None)
|
136
|
+
elif isinstance(unit, dict):
|
137
|
+
desc = unit.get("desc")
|
138
|
+
length = unit.get("length")
|
139
|
+
index = unit.get("index")
|
140
|
+
progress = unit.get("progress")
|
141
|
+
unit_name = unit.get("unit")
|
142
|
+
else:
|
143
|
+
return
|
144
|
+
|
145
|
+
total = _coerce_int(length)
|
146
|
+
current = _coerce_int(index)
|
147
|
+
if current is None and isinstance(progress, (int, float)) and total:
|
148
|
+
current = int(progress / total)
|
149
|
+
|
150
|
+
callback(desc or "Processing", progress, total, str(unit_name or ""))
|
151
|
+
|
152
|
+
|
153
|
+
async def _pump_job_updates(
|
154
|
+
job,
|
155
|
+
emit_log: Callable[[str], None],
|
156
|
+
progress_callback: Optional[
|
157
|
+
Callable[[str, Optional[int], Optional[int], str], None]
|
158
|
+
],
|
159
|
+
) -> None:
|
160
|
+
"""Consume asynchronous updates from *job* and emit logs and progress."""
|
161
|
+
|
162
|
+
async for update in job: # type: ignore[async-for]
|
163
|
+
update_type = getattr(update, "type", "status")
|
164
|
+
if update_type == "output":
|
165
|
+
outputs = getattr(update, "outputs", None) or []
|
166
|
+
if isinstance(outputs, (list, tuple)) and len(outputs) == 4:
|
167
|
+
log_text_candidate = outputs[1] or ""
|
168
|
+
if isinstance(log_text_candidate, str):
|
169
|
+
emit_log(log_text_candidate)
|
170
|
+
if getattr(update, "final", False):
|
171
|
+
break
|
172
|
+
continue
|
173
|
+
|
174
|
+
status_update: StatusUpdate = update # type: ignore[assignment]
|
175
|
+
log_entry = getattr(status_update, "log", None)
|
176
|
+
if log_entry:
|
177
|
+
message = (
|
178
|
+
log_entry[0] if isinstance(log_entry, (list, tuple)) else log_entry
|
179
|
+
)
|
180
|
+
if isinstance(message, str):
|
181
|
+
emit_log(message)
|
182
|
+
|
183
|
+
if progress_callback and status_update.progress_data:
|
184
|
+
for unit in status_update.progress_data:
|
185
|
+
_emit_progress_update(progress_callback, unit)
|
186
|
+
|
187
|
+
if status_update.code in {Status.FINISHED, Status.CANCELLED}:
|
188
|
+
break
|
189
|
+
|
190
|
+
|
191
|
+
def _poll_job_updates(
|
192
|
+
job,
|
193
|
+
emit_log: Callable[[str], None],
|
194
|
+
progress_callback: Optional[
|
195
|
+
Callable[[str, Optional[int], Optional[int], str], None]
|
196
|
+
],
|
197
|
+
*,
|
198
|
+
interval: float = 0.25,
|
199
|
+
) -> None:
|
200
|
+
"""Poll *job* for outputs and status updates when async streaming is unavailable."""
|
201
|
+
|
202
|
+
while True:
|
203
|
+
if job.done():
|
204
|
+
break
|
205
|
+
|
206
|
+
status: Optional[StatusUpdate] = None
|
207
|
+
with suppress(Exception):
|
208
|
+
status = job.status() # type: ignore[assignment]
|
209
|
+
|
210
|
+
if status is not None:
|
211
|
+
if progress_callback:
|
212
|
+
progress_data = getattr(status, "progress_data", None)
|
213
|
+
if progress_data:
|
214
|
+
for unit in progress_data:
|
215
|
+
_emit_progress_update(progress_callback, unit)
|
216
|
+
log_entry = getattr(status, "log", None)
|
217
|
+
if log_entry:
|
218
|
+
message = (
|
219
|
+
log_entry[0] if isinstance(log_entry, (list, tuple)) else log_entry
|
220
|
+
)
|
221
|
+
if isinstance(message, str):
|
222
|
+
emit_log(message)
|
223
|
+
|
224
|
+
outputs = []
|
225
|
+
with suppress(Exception):
|
226
|
+
outputs = job.outputs()
|
227
|
+
if outputs:
|
228
|
+
latest = outputs[-1]
|
229
|
+
if isinstance(latest, (list, tuple)) and len(latest) == 4:
|
230
|
+
log_text_candidate = latest[1] or ""
|
231
|
+
if isinstance(log_text_candidate, str):
|
232
|
+
emit_log(log_text_candidate)
|
233
|
+
|
234
|
+
time.sleep(interval)
|
235
|
+
|
236
|
+
|
237
|
+
def _stream_job_updates(
|
238
|
+
job,
|
239
|
+
emit_log: Callable[[str], None],
|
240
|
+
*,
|
241
|
+
progress_callback: Optional[
|
242
|
+
Callable[[str, Optional[int], Optional[int], str], None]
|
243
|
+
] = None,
|
244
|
+
) -> bool:
|
245
|
+
"""Attempt to stream updates directly from *job*.
|
246
|
+
|
247
|
+
Returns ``True`` when streaming occurred, ``False`` when the legacy
|
248
|
+
generator-based fallback should be used.
|
249
|
+
"""
|
250
|
+
|
251
|
+
communicator = getattr(job, "communicator", None)
|
252
|
+
if communicator is None:
|
253
|
+
return False
|
254
|
+
|
255
|
+
try:
|
256
|
+
asyncio.run(
|
257
|
+
_pump_job_updates(
|
258
|
+
job,
|
259
|
+
emit_log,
|
260
|
+
progress_callback,
|
261
|
+
)
|
262
|
+
)
|
263
|
+
except RuntimeError:
|
264
|
+
_poll_job_updates(job, emit_log, progress_callback)
|
265
|
+
|
266
|
+
return True
|
267
|
+
|
268
|
+
|
55
269
|
def _build_parser() -> argparse.ArgumentParser:
|
56
270
|
parser = argparse.ArgumentParser(
|
57
271
|
description="Send a video to a running talks-reducer server and download the result.",
|
@@ -78,6 +292,11 @@ def _build_parser() -> argparse.ArgumentParser:
|
|
78
292
|
action="store_true",
|
79
293
|
help="Print the server log after processing completes.",
|
80
294
|
)
|
295
|
+
parser.add_argument(
|
296
|
+
"--stream",
|
297
|
+
action="store_true",
|
298
|
+
help="Stream remote progress updates while waiting for the result.",
|
299
|
+
)
|
81
300
|
return parser
|
82
301
|
|
83
302
|
|
@@ -85,16 +304,51 @@ def main(argv: Optional[Sequence[str]] = None) -> None:
|
|
85
304
|
parser = _build_parser()
|
86
305
|
args = parser.parse_args(argv)
|
87
306
|
|
307
|
+
printed_log_header = False
|
308
|
+
|
309
|
+
def _stream(line: str) -> None:
|
310
|
+
nonlocal printed_log_header
|
311
|
+
if not printed_log_header:
|
312
|
+
print("\nServer log:", flush=True)
|
313
|
+
printed_log_header = True
|
314
|
+
print(line, flush=True)
|
315
|
+
|
316
|
+
progress_state: dict[str, tuple[Optional[int], Optional[int], str]] = {}
|
317
|
+
|
318
|
+
def _progress(
|
319
|
+
desc: str, current: Optional[int], total: Optional[int], unit: str
|
320
|
+
) -> None:
|
321
|
+
key = desc or "Processing"
|
322
|
+
state = (current, total, unit)
|
323
|
+
if progress_state.get(key) == state:
|
324
|
+
return
|
325
|
+
progress_state[key] = state
|
326
|
+
|
327
|
+
parts: list[str] = []
|
328
|
+
if current is not None and total and total > 0:
|
329
|
+
percent = (current / total) * 100
|
330
|
+
parts.append(f"{current}/{total}")
|
331
|
+
parts.append(f"{percent:.1f}%")
|
332
|
+
elif current is not None:
|
333
|
+
parts.append(str(current))
|
334
|
+
if unit:
|
335
|
+
parts.append(unit)
|
336
|
+
message = " ".join(parts).strip()
|
337
|
+
print(f"{key}: {message or 'update'}", flush=True)
|
338
|
+
|
88
339
|
destination, summary, log_text = send_video(
|
89
340
|
input_path=args.input.expanduser(),
|
90
341
|
output_path=args.output.expanduser() if args.output else None,
|
91
342
|
server_url=args.server,
|
92
343
|
small=args.small,
|
344
|
+
log_callback=_stream if args.print_log else None,
|
345
|
+
stream_updates=args.stream,
|
346
|
+
progress_callback=_progress if args.stream else None,
|
93
347
|
)
|
94
348
|
|
95
349
|
print(summary)
|
96
350
|
print(f"Saved processed video to {destination}")
|
97
|
-
if args.print_log:
|
351
|
+
if args.print_log and log_text.strip() and not printed_log_header:
|
98
352
|
print("\nServer log:\n" + log_text)
|
99
353
|
|
100
354
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: talks-reducer
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.6.0
|
4
4
|
Summary: CLI for speeding up long-form talks by removing silence
|
5
5
|
Author: Talks Reducer Maintainers
|
6
6
|
License-Expression: MIT
|
@@ -70,6 +70,19 @@ Example CLI usage:
|
|
70
70
|
talks-reducer --small input.mp4
|
71
71
|
```
|
72
72
|
|
73
|
+
Need to offload work to a remote Talks Reducer server? Pass `--url` with the
|
74
|
+
server address and the CLI will upload the input, wait for processing to finish,
|
75
|
+
and download the rendered video. You can also provide `--host` to expand to the
|
76
|
+
default Talks Reducer port (`http://<host>:9005`):
|
77
|
+
|
78
|
+
```sh
|
79
|
+
talks-reducer --url http://localhost:9005 demo.mp4
|
80
|
+
talks-reducer --host 192.168.1.42 demo.mp4
|
81
|
+
```
|
82
|
+
|
83
|
+
Want to see progress as the remote server works? Add `--server-stream` so the
|
84
|
+
CLI prints live progress bars and log lines while you wait for the download.
|
85
|
+
|
73
86
|
### Speech detection
|
74
87
|
|
75
88
|
Talks Reducer now relies on its built-in volume thresholding to detect speech. Adjust `--silent_threshold` if you need to fine-tune when segments count as silence. Dropping the optional Silero VAD integration keeps the install lightweight and avoids pulling in PyTorch.
|
@@ -91,6 +104,10 @@ Want the server to live in your system tray instead of a terminal window? Use:
|
|
91
104
|
talks-reducer server-tray
|
92
105
|
```
|
93
106
|
|
107
|
+
Bundled Windows builds include the same behaviour: run
|
108
|
+
`talks-reducer.exe --server` to launch the tray-managed server directly from the
|
109
|
+
desktop shortcut without opening the GUI first.
|
110
|
+
|
94
111
|
Pass `--debug` to print verbose logs about the tray icon lifecycle, and
|
95
112
|
`--tray-mode pystray-detached` to try pystray's alternate detached runner. If
|
96
113
|
the icon backend refuses to appear, fall back to `--tray-mode headless` to keep
|
@@ -111,6 +128,16 @@ This opens a local web page featuring a drag-and-drop upload zone, a **Small vid
|
|
111
128
|
progress indicator, and automatic previews of the processed output. Once the job completes you can inspect the resulting compression
|
112
129
|
ratio and download the rendered video directly from the page.
|
113
130
|
|
131
|
+
The desktop GUI mirrors this behaviour. Open **Advanced** settings to provide a
|
132
|
+
server URL and click **Discover** to scan your local network for Talks Reducer
|
133
|
+
instances listening on port `9005`. The button now updates with the discovery
|
134
|
+
progress, showing the scanned/total host count as `scanned / total`. A new
|
135
|
+
**Processing mode** toggle lets you decide whether work stays local or uploads
|
136
|
+
to the configured server—the **Remote** option becomes available as soon as a
|
137
|
+
URL is supplied. Leave the toggle on **Local** to keep rendering on this
|
138
|
+
machine even if a server is saved; switch to **Remote** to hand jobs off while
|
139
|
+
the GUI downloads the finished files automatically.
|
140
|
+
|
114
141
|
### Uploading and retrieving a processed video
|
115
142
|
|
116
143
|
1. Open the printed `http://localhost:<port>` address (the default port is `9005`).
|
@@ -0,0 +1,21 @@
|
|
1
|
+
talks_reducer/__about__.py,sha256=T9hVPyc5xZOC-_kFZV6XUTbJQBz0IfVup-zpFmtFjuk,92
|
2
|
+
talks_reducer/__init__.py,sha256=Kzh1hXaw6Vq3DyTqrnJGOq8pn0P8lvaDcsg1bFUjFKk,208
|
3
|
+
talks_reducer/__main__.py,sha256=azR_vh8HFPLaOnh-L6gUFWsL67I6iHtbeH5rQhsipGY,299
|
4
|
+
talks_reducer/audio.py,sha256=sjHMeY0H9ESG-Gn5BX0wFRBX7sXjWwsgS8u9Vb0bJ88,4396
|
5
|
+
talks_reducer/chunks.py,sha256=IpdZxRFPURSG5wP-OQ_p09CVP8wcKwIFysV29zOTSWI,2959
|
6
|
+
talks_reducer/cli.py,sha256=6hHPuHzDh-BzZOLzHIAqEKU8VpD0cUnocpisLrOOe8I,16521
|
7
|
+
talks_reducer/discovery.py,sha256=BJ-iMir65cJMs0u-_EYdknBQT_grvCZaJNOx1xGi2PU,4590
|
8
|
+
talks_reducer/ffmpeg.py,sha256=dsHBOBcr5XCSg0q3xmzLOcibBiEdyrXdEQa-ze5vQsM,12551
|
9
|
+
talks_reducer/gui.py,sha256=7oYfqsUKGaSCJqJlqWKdNpSZu_14jC8ehMryU6NDmVc,87872
|
10
|
+
talks_reducer/models.py,sha256=6Q_8rmHLyImXp88D4B7ptTbFaH_xXa_yxs8A2dypz2Y,2004
|
11
|
+
talks_reducer/pipeline.py,sha256=naAP8gli9MahoxVdla2tRn2vdDuBBU5ywWkYfZLkMcE,12211
|
12
|
+
talks_reducer/progress.py,sha256=Mh43M6VWhjjUv9CI22xfD2EJ_7Aq3PCueqefQ9Bd5-o,4565
|
13
|
+
talks_reducer/server.py,sha256=NZ_xTSSHIc6DY0-UBz7FsGbsA1L7cZifcDbn3RUVUo0,13743
|
14
|
+
talks_reducer/server_tray.py,sha256=Wmnhu9OujbJKo2gV4anZAElqgD8ME08EaZymN8TLsnc,22923
|
15
|
+
talks_reducer/service_client.py,sha256=JnhQhRxVwrGo9eUlduoZ6f_YYyXvjU7hF8UTNprH7TM,10984
|
16
|
+
talks_reducer-0.6.0.dist-info/licenses/LICENSE,sha256=jN17mHNR3e84awmH3AbpWBcBDBzPxEH0rcOFoj1s7sQ,1124
|
17
|
+
talks_reducer-0.6.0.dist-info/METADATA,sha256=6nQdOsM8Z33wuoplFcwrIDMLL0gRzwzLm5u4nVcOw84,8141
|
18
|
+
talks_reducer-0.6.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
19
|
+
talks_reducer-0.6.0.dist-info/entry_points.txt,sha256=X2pjoh2vWBXXExVWorv1mbA1aTEVP3fyuZH4AixqZK4,208
|
20
|
+
talks_reducer-0.6.0.dist-info/top_level.txt,sha256=pJWGcy__LR9JIEKH3QJyFmk9XrIsiFtqvuMNxFdIzDU,14
|
21
|
+
talks_reducer-0.6.0.dist-info/RECORD,,
|
@@ -1,20 +0,0 @@
|
|
1
|
-
talks_reducer/__about__.py,sha256=_Snp3i-vcwi7Uqp3CrEc_ulvuTOndH87ENERkNWGdsw,92
|
2
|
-
talks_reducer/__init__.py,sha256=Kzh1hXaw6Vq3DyTqrnJGOq8pn0P8lvaDcsg1bFUjFKk,208
|
3
|
-
talks_reducer/__main__.py,sha256=azR_vh8HFPLaOnh-L6gUFWsL67I6iHtbeH5rQhsipGY,299
|
4
|
-
talks_reducer/audio.py,sha256=sjHMeY0H9ESG-Gn5BX0wFRBX7sXjWwsgS8u9Vb0bJ88,4396
|
5
|
-
talks_reducer/chunks.py,sha256=IpdZxRFPURSG5wP-OQ_p09CVP8wcKwIFysV29zOTSWI,2959
|
6
|
-
talks_reducer/cli.py,sha256=VOFZni7rl2CTCK3aLUO_iCV6PNva9ylIrmxxwNltQG4,8031
|
7
|
-
talks_reducer/ffmpeg.py,sha256=dsHBOBcr5XCSg0q3xmzLOcibBiEdyrXdEQa-ze5vQsM,12551
|
8
|
-
talks_reducer/gui.py,sha256=-zBvuPv9T4Z1mZ90QwRZEUUY65mUpWRKCgRZyV5aEPM,61121
|
9
|
-
talks_reducer/models.py,sha256=6Q_8rmHLyImXp88D4B7ptTbFaH_xXa_yxs8A2dypz2Y,2004
|
10
|
-
talks_reducer/pipeline.py,sha256=JnWa84sMwYncGv7crhGLZu0cW1Xx0eGQyFP-nLp-DHk,12222
|
11
|
-
talks_reducer/progress.py,sha256=Mh43M6VWhjjUv9CI22xfD2EJ_7Aq3PCueqefQ9Bd5-o,4565
|
12
|
-
talks_reducer/server.py,sha256=Czc1q6N7LJKsq-dBEum21zw31kA1uXBKC8T1-NtiscA,11235
|
13
|
-
talks_reducer/server_tray.py,sha256=O8eiu31iWFvP2AROFG1OZ83n0aePtuSZqd7RE6NAqQw,13377
|
14
|
-
talks_reducer/service_client.py,sha256=Hv3hKBUn2n7M5hLUVHamNphUdMt9bCXUrJaJMZHlE0Q,3088
|
15
|
-
talks_reducer-0.5.3.dist-info/licenses/LICENSE,sha256=jN17mHNR3e84awmH3AbpWBcBDBzPxEH0rcOFoj1s7sQ,1124
|
16
|
-
talks_reducer-0.5.3.dist-info/METADATA,sha256=bPkllANMRMjJrt5_fBMmwNxmB1UARiHVmmILsoPY3AY,6702
|
17
|
-
talks_reducer-0.5.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
18
|
-
talks_reducer-0.5.3.dist-info/entry_points.txt,sha256=X2pjoh2vWBXXExVWorv1mbA1aTEVP3fyuZH4AixqZK4,208
|
19
|
-
talks_reducer-0.5.3.dist-info/top_level.txt,sha256=pJWGcy__LR9JIEKH3QJyFmk9XrIsiFtqvuMNxFdIzDU,14
|
20
|
-
talks_reducer-0.5.3.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|