talks-reducer 0.5.0__py3-none-any.whl → 0.5.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- talks_reducer/__about__.py +1 -1
- talks_reducer/cli.py +2 -2
- talks_reducer/ffmpeg.py +6 -0
- talks_reducer/gui.py +46 -4
- talks_reducer/models.py +27 -2
- talks_reducer/pipeline.py +1 -1
- talks_reducer/server.py +5 -3
- talks_reducer/server_tray.py +417 -0
- talks_reducer/service_client.py +102 -0
- {talks_reducer-0.5.0.dist-info → talks_reducer-0.5.2.dist-info}/METADATA +51 -2
- talks_reducer-0.5.2.dist-info/RECORD +20 -0
- {talks_reducer-0.5.0.dist-info → talks_reducer-0.5.2.dist-info}/entry_points.txt +1 -0
- talks_reducer-0.5.0.dist-info/RECORD +0 -18
- {talks_reducer-0.5.0.dist-info → talks_reducer-0.5.2.dist-info}/WHEEL +0 -0
- {talks_reducer-0.5.0.dist-info → talks_reducer-0.5.2.dist-info}/licenses/LICENSE +0 -0
- {talks_reducer-0.5.0.dist-info → talks_reducer-0.5.2.dist-info}/top_level.txt +0 -0
talks_reducer/__about__.py
CHANGED
talks_reducer/cli.py
CHANGED
@@ -18,7 +18,7 @@ try:
|
|
18
18
|
except Exception: # pragma: no cover - fallback if metadata file missing
|
19
19
|
_about_version = ""
|
20
20
|
from .ffmpeg import FFmpegNotFoundError
|
21
|
-
from .models import ProcessingOptions
|
21
|
+
from .models import ProcessingOptions, default_temp_folder
|
22
22
|
from .pipeline import speed_up_video
|
23
23
|
from .progress import TqdmProgressReporter
|
24
24
|
|
@@ -55,7 +55,7 @@ def _build_parser() -> argparse.ArgumentParser:
|
|
55
55
|
parser.add_argument(
|
56
56
|
"--temp_folder",
|
57
57
|
type=str,
|
58
|
-
default=
|
58
|
+
default=str(default_temp_folder()),
|
59
59
|
help="The file path of the temporary working folder.",
|
60
60
|
)
|
61
61
|
parser.add_argument(
|
talks_reducer/ffmpeg.py
CHANGED
@@ -52,6 +52,9 @@ def find_ffmpeg() -> Optional[str]:
|
|
52
52
|
"C:\\ProgramData\\chocolatey\\bin\\ffmpeg.exe",
|
53
53
|
"C:\\Program Files\\ffmpeg\\bin\\ffmpeg.exe",
|
54
54
|
"C:\\ffmpeg\\bin\\ffmpeg.exe",
|
55
|
+
"/usr/local/bin/ffmpeg",
|
56
|
+
"/opt/homebrew/bin/ffmpeg",
|
57
|
+
"/usr/bin/ffmpeg",
|
55
58
|
"ffmpeg",
|
56
59
|
]
|
57
60
|
|
@@ -92,6 +95,9 @@ def find_ffprobe() -> Optional[str]:
|
|
92
95
|
"C:\\ProgramData\\chocolatey\\bin\\ffprobe.exe",
|
93
96
|
"C:\\Program Files\\ffmpeg\\bin\\ffprobe.exe",
|
94
97
|
"C:\\ffmpeg\\bin\\ffprobe.exe",
|
98
|
+
"/usr/local/bin/ffprobe",
|
99
|
+
"/opt/homebrew/bin/ffprobe",
|
100
|
+
"/usr/bin/ffprobe",
|
95
101
|
"ffprobe",
|
96
102
|
]
|
97
103
|
|
talks_reducer/gui.py
CHANGED
@@ -2,6 +2,7 @@
|
|
2
2
|
|
3
3
|
from __future__ import annotations
|
4
4
|
|
5
|
+
import argparse
|
5
6
|
import json
|
6
7
|
import os
|
7
8
|
import re
|
@@ -10,7 +11,7 @@ import sys
|
|
10
11
|
import threading
|
11
12
|
from importlib.metadata import version
|
12
13
|
from pathlib import Path
|
13
|
-
from typing import TYPE_CHECKING, Callable, Iterable, List, Optional, Sequence
|
14
|
+
from typing import TYPE_CHECKING, Any, Callable, Iterable, List, Optional, Sequence
|
14
15
|
|
15
16
|
if TYPE_CHECKING:
|
16
17
|
import tkinter as tk
|
@@ -20,7 +21,7 @@ try:
|
|
20
21
|
from .cli import gather_input_files
|
21
22
|
from .cli import main as cli_main
|
22
23
|
from .ffmpeg import FFmpegNotFoundError
|
23
|
-
from .models import ProcessingOptions
|
24
|
+
from .models import ProcessingOptions, default_temp_folder
|
24
25
|
from .pipeline import speed_up_video
|
25
26
|
from .progress import ProgressHandle, SignalProgressReporter
|
26
27
|
except ImportError: # pragma: no cover - handled at runtime
|
@@ -34,7 +35,7 @@ except ImportError: # pragma: no cover - handled at runtime
|
|
34
35
|
from talks_reducer.cli import gather_input_files
|
35
36
|
from talks_reducer.cli import main as cli_main
|
36
37
|
from talks_reducer.ffmpeg import FFmpegNotFoundError
|
37
|
-
from talks_reducer.models import ProcessingOptions
|
38
|
+
from talks_reducer.models import ProcessingOptions, default_temp_folder
|
38
39
|
from talks_reducer.pipeline import speed_up_video
|
39
40
|
from talks_reducer.progress import ProgressHandle, SignalProgressReporter
|
40
41
|
|
@@ -149,6 +150,32 @@ DARK_THEME = {
|
|
149
150
|
}
|
150
151
|
|
151
152
|
|
153
|
+
_TRAY_LOCK = threading.Lock()
|
154
|
+
_TRAY_PROCESS: Optional[subprocess.Popen[Any]] = None
|
155
|
+
|
156
|
+
|
157
|
+
def _ensure_server_tray_running(extra_args: Optional[Sequence[str]] = None) -> None:
|
158
|
+
"""Start the server tray in a background process if one is not active."""
|
159
|
+
|
160
|
+
global _TRAY_PROCESS
|
161
|
+
|
162
|
+
with _TRAY_LOCK:
|
163
|
+
if _TRAY_PROCESS is not None and _TRAY_PROCESS.poll() is None:
|
164
|
+
return
|
165
|
+
|
166
|
+
command = [sys.executable, "-m", "talks_reducer.server_tray"]
|
167
|
+
if extra_args:
|
168
|
+
command.extend(extra_args)
|
169
|
+
|
170
|
+
try:
|
171
|
+
_TRAY_PROCESS = subprocess.Popen(command)
|
172
|
+
except Exception as exc: # pragma: no cover - best-effort fallback
|
173
|
+
_TRAY_PROCESS = None
|
174
|
+
sys.stderr.write(
|
175
|
+
f"Warning: failed to launch Talks Reducer server tray: {exc}\n"
|
176
|
+
)
|
177
|
+
|
178
|
+
|
152
179
|
class _GuiProgressHandle(ProgressHandle):
|
153
180
|
"""Simple progress handle that records totals but only logs milestones."""
|
154
181
|
|
@@ -480,7 +507,7 @@ class TalksReducerGUI:
|
|
480
507
|
self.advanced_frame, "Output file", self.output_var, row=0, browse=True
|
481
508
|
)
|
482
509
|
|
483
|
-
self.temp_var = self.tk.StringVar(value=
|
510
|
+
self.temp_var = self.tk.StringVar(value=str(default_temp_folder()))
|
484
511
|
self._add_entry(
|
485
512
|
self.advanced_frame, "Temp folder", self.temp_var, row=1, browse=True
|
486
513
|
)
|
@@ -1493,6 +1520,17 @@ def main(argv: Optional[Sequence[str]] = None) -> bool:
|
|
1493
1520
|
if argv is None:
|
1494
1521
|
argv = sys.argv[1:]
|
1495
1522
|
|
1523
|
+
parser = argparse.ArgumentParser(add_help=False)
|
1524
|
+
parser.add_argument(
|
1525
|
+
"--no-tray",
|
1526
|
+
action="store_true",
|
1527
|
+
help="Do not start the Talks Reducer server tray alongside the GUI.",
|
1528
|
+
)
|
1529
|
+
|
1530
|
+
parsed_args, remaining = parser.parse_known_args(argv)
|
1531
|
+
no_tray = parsed_args.no_tray
|
1532
|
+
argv = remaining
|
1533
|
+
|
1496
1534
|
if argv:
|
1497
1535
|
launch_gui = False
|
1498
1536
|
if sys.platform == "win32" and not any(arg.startswith("-") for arg in argv):
|
@@ -1506,6 +1544,8 @@ def main(argv: Optional[Sequence[str]] = None) -> bool:
|
|
1506
1544
|
if launch_gui:
|
1507
1545
|
try:
|
1508
1546
|
app = TalksReducerGUI(argv, auto_run=True)
|
1547
|
+
if not no_tray:
|
1548
|
+
_ensure_server_tray_running()
|
1509
1549
|
app.run()
|
1510
1550
|
return True
|
1511
1551
|
except Exception:
|
@@ -1567,6 +1607,8 @@ def main(argv: Optional[Sequence[str]] = None) -> bool:
|
|
1567
1607
|
# Catch and report any errors during GUI initialization
|
1568
1608
|
try:
|
1569
1609
|
app = TalksReducerGUI()
|
1610
|
+
if not no_tray:
|
1611
|
+
_ensure_server_tray_running()
|
1570
1612
|
app.run()
|
1571
1613
|
return True
|
1572
1614
|
except Exception as e:
|
talks_reducer/models.py
CHANGED
@@ -2,11 +2,36 @@
|
|
2
2
|
|
3
3
|
from __future__ import annotations
|
4
4
|
|
5
|
-
|
5
|
+
import os
|
6
|
+
import sys
|
7
|
+
import tempfile
|
8
|
+
from dataclasses import dataclass, field
|
6
9
|
from pathlib import Path
|
7
10
|
from typing import Optional
|
8
11
|
|
9
12
|
|
13
|
+
def default_temp_folder() -> Path:
|
14
|
+
"""Return an OS-appropriate default temporary workspace directory."""
|
15
|
+
|
16
|
+
if sys.platform == "darwin":
|
17
|
+
base = Path.home() / "Library" / "Application Support" / "talks-reducer"
|
18
|
+
elif sys.platform == "win32":
|
19
|
+
appdata = os.environ.get("LOCALAPPDATA") or os.environ.get("APPDATA")
|
20
|
+
base = (
|
21
|
+
Path(appdata)
|
22
|
+
if appdata
|
23
|
+
else Path.home() / "AppData" / "Local" / "talks-reducer"
|
24
|
+
)
|
25
|
+
else:
|
26
|
+
xdg_runtime = os.environ.get("XDG_RUNTIME_DIR")
|
27
|
+
if xdg_runtime:
|
28
|
+
base = Path(xdg_runtime) / "talks-reducer"
|
29
|
+
else:
|
30
|
+
base = Path(tempfile.gettempdir()) / "talks-reducer"
|
31
|
+
|
32
|
+
return base / "temp"
|
33
|
+
|
34
|
+
|
10
35
|
@dataclass(frozen=True)
|
11
36
|
class ProcessingOptions:
|
12
37
|
"""Configuration values controlling how the talks reducer processes media.
|
@@ -24,7 +49,7 @@ class ProcessingOptions:
|
|
24
49
|
sounded_speed: float = 1.0
|
25
50
|
frame_spreadage: int = 2
|
26
51
|
audio_fade_envelope_size: int = 400
|
27
|
-
temp_folder: Path =
|
52
|
+
temp_folder: Path = field(default_factory=default_temp_folder)
|
28
53
|
small: bool = False
|
29
54
|
|
30
55
|
|
talks_reducer/pipeline.py
CHANGED
@@ -46,7 +46,7 @@ def _input_to_output_filename(filename: Path, small: bool = False) -> Path:
|
|
46
46
|
|
47
47
|
def _create_path(path: Path) -> None:
|
48
48
|
try:
|
49
|
-
path.mkdir()
|
49
|
+
path.mkdir(parents=True, exist_ok=True)
|
50
50
|
except OSError as exc: # pragma: no cover - defensive logging
|
51
51
|
raise AssertionError(
|
52
52
|
"Creation of the directory failed. (The TEMP folder may already exist. Delete or rename it, and try again.)"
|
talks_reducer/server.py
CHANGED
@@ -273,8 +273,9 @@ def build_interface() -> gr.Blocks:
|
|
273
273
|
gr.Markdown(
|
274
274
|
"""
|
275
275
|
## Talks Reducer — Simple Server
|
276
|
-
Drop a video into the zone below or click to browse.
|
277
|
-
apply the 720p/128k preset before processing starts
|
276
|
+
Drop a video into the zone below or click to browse. **Small video** is enabled
|
277
|
+
by default to apply the 720p/128k preset before processing starts—clear it to
|
278
|
+
keep the original resolution.
|
278
279
|
""".strip()
|
279
280
|
)
|
280
281
|
|
@@ -284,7 +285,7 @@ def build_interface() -> gr.Blocks:
|
|
284
285
|
file_types=["video"],
|
285
286
|
type="filepath",
|
286
287
|
)
|
287
|
-
small_checkbox = gr.Checkbox(label="Small video", value=
|
288
|
+
small_checkbox = gr.Checkbox(label="Small video", value=True)
|
288
289
|
|
289
290
|
video_output = gr.Video(label="Processed video")
|
290
291
|
summary_output = gr.Markdown()
|
@@ -296,6 +297,7 @@ def build_interface() -> gr.Blocks:
|
|
296
297
|
inputs=[file_input, small_checkbox],
|
297
298
|
outputs=[video_output, log_output, summary_output, download_output],
|
298
299
|
queue=True,
|
300
|
+
api_name="process_video",
|
299
301
|
)
|
300
302
|
|
301
303
|
demo.queue(default_concurrency_limit=1)
|
@@ -0,0 +1,417 @@
|
|
1
|
+
"""System tray launcher for the Talks Reducer Gradio server."""
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
import argparse
|
6
|
+
import atexit
|
7
|
+
import logging
|
8
|
+
import subprocess
|
9
|
+
import sys
|
10
|
+
import threading
|
11
|
+
import time
|
12
|
+
import webbrowser
|
13
|
+
from contextlib import suppress
|
14
|
+
from importlib import resources
|
15
|
+
from pathlib import Path
|
16
|
+
from typing import Any, Optional, Sequence
|
17
|
+
|
18
|
+
from PIL import Image
|
19
|
+
|
20
|
+
from .server import build_interface
|
21
|
+
|
22
|
+
try: # pragma: no cover - import guarded for clearer error message at runtime
|
23
|
+
import pystray
|
24
|
+
except ModuleNotFoundError as exc: # pragma: no cover - handled in ``main``
|
25
|
+
PYSTRAY_IMPORT_ERROR = exc
|
26
|
+
pystray = None # type: ignore[assignment]
|
27
|
+
else:
|
28
|
+
PYSTRAY_IMPORT_ERROR = None
|
29
|
+
|
30
|
+
|
31
|
+
LOGGER = logging.getLogger(__name__)
|
32
|
+
|
33
|
+
|
34
|
+
def _guess_local_url(host: Optional[str], port: int) -> str:
|
35
|
+
"""Return the URL the server is most likely reachable at locally."""
|
36
|
+
|
37
|
+
if host in (None, "", "0.0.0.0", "::"):
|
38
|
+
hostname = "127.0.0.1"
|
39
|
+
else:
|
40
|
+
hostname = host
|
41
|
+
return f"http://{hostname}:{port}/"
|
42
|
+
|
43
|
+
|
44
|
+
def _load_icon() -> Image.Image:
|
45
|
+
"""Load the tray icon image, falling back to a solid accent square."""
|
46
|
+
|
47
|
+
LOGGER.debug("Attempting to load tray icon image.")
|
48
|
+
|
49
|
+
candidates = [
|
50
|
+
Path(__file__).resolve().parent.parent / "docs" / "assets" / "icon.png",
|
51
|
+
Path(__file__).resolve().parent / "icon.png",
|
52
|
+
]
|
53
|
+
|
54
|
+
for candidate in candidates:
|
55
|
+
LOGGER.debug("Checking icon candidate at %s", candidate)
|
56
|
+
if candidate.exists():
|
57
|
+
try:
|
58
|
+
image = Image.open(candidate).copy()
|
59
|
+
except Exception as exc: # pragma: no cover - diagnostic log
|
60
|
+
LOGGER.warning("Failed to load tray icon from %s: %s", candidate, exc)
|
61
|
+
else:
|
62
|
+
LOGGER.debug("Loaded tray icon from %s", candidate)
|
63
|
+
return image
|
64
|
+
|
65
|
+
with suppress(FileNotFoundError):
|
66
|
+
resource_icon = resources.files("talks_reducer") / "assets" / "icon.png"
|
67
|
+
if resource_icon.is_file():
|
68
|
+
LOGGER.debug("Loading tray icon from package resources")
|
69
|
+
with resource_icon.open("rb") as handle:
|
70
|
+
try:
|
71
|
+
return Image.open(handle).copy()
|
72
|
+
except Exception as exc: # pragma: no cover - diagnostic log
|
73
|
+
LOGGER.warning(
|
74
|
+
"Failed to load tray icon from package resources: %s", exc
|
75
|
+
)
|
76
|
+
|
77
|
+
LOGGER.warning("Falling back to generated tray icon; packaged image not found")
|
78
|
+
# Fallback to a simple accent-colored square to avoid import errors
|
79
|
+
image = Image.new("RGBA", (64, 64), color=(37, 99, 235, 255))
|
80
|
+
return image
|
81
|
+
|
82
|
+
|
83
|
+
class _ServerTrayApplication:
|
84
|
+
"""Coordinate the Gradio server lifecycle and the system tray icon."""
|
85
|
+
|
86
|
+
def __init__(
|
87
|
+
self,
|
88
|
+
*,
|
89
|
+
host: Optional[str],
|
90
|
+
port: int,
|
91
|
+
share: bool,
|
92
|
+
open_browser: bool,
|
93
|
+
tray_mode: str,
|
94
|
+
) -> None:
|
95
|
+
self._host = host
|
96
|
+
self._port = port
|
97
|
+
self._share = share
|
98
|
+
self._open_browser_on_start = open_browser
|
99
|
+
self._tray_mode = tray_mode
|
100
|
+
|
101
|
+
self._stop_event = threading.Event()
|
102
|
+
self._ready_event = threading.Event()
|
103
|
+
self._gui_lock = threading.Lock()
|
104
|
+
|
105
|
+
self._server_handle: Optional[Any] = None
|
106
|
+
self._local_url: Optional[str] = None
|
107
|
+
self._share_url: Optional[str] = None
|
108
|
+
self._icon: Optional[pystray.Icon] = None
|
109
|
+
self._gui_process: Optional[subprocess.Popen[Any]] = None
|
110
|
+
|
111
|
+
# Server lifecycle -------------------------------------------------
|
112
|
+
|
113
|
+
def _launch_server(self) -> None:
|
114
|
+
"""Start the Gradio server in the background and record its URLs."""
|
115
|
+
|
116
|
+
LOGGER.info(
|
117
|
+
"Starting Talks Reducer server on host=%s port=%s share=%s",
|
118
|
+
self._host or "127.0.0.1",
|
119
|
+
self._port,
|
120
|
+
self._share,
|
121
|
+
)
|
122
|
+
demo = build_interface()
|
123
|
+
server = demo.launch(
|
124
|
+
server_name=self._host,
|
125
|
+
server_port=self._port,
|
126
|
+
share=self._share,
|
127
|
+
inbrowser=False,
|
128
|
+
prevent_thread_lock=True,
|
129
|
+
show_error=True,
|
130
|
+
)
|
131
|
+
|
132
|
+
self._server_handle = server
|
133
|
+
self._local_url = getattr(
|
134
|
+
server, "local_url", _guess_local_url(self._host, self._port)
|
135
|
+
)
|
136
|
+
self._share_url = getattr(server, "share_url", None)
|
137
|
+
self._ready_event.set()
|
138
|
+
LOGGER.info("Server ready at %s", self._local_url)
|
139
|
+
|
140
|
+
# Keep checking for a share URL while the server is running.
|
141
|
+
while not self._stop_event.is_set():
|
142
|
+
share_url = getattr(server, "share_url", None)
|
143
|
+
if share_url:
|
144
|
+
self._share_url = share_url
|
145
|
+
LOGGER.info("Share URL available: %s", share_url)
|
146
|
+
time.sleep(0.5)
|
147
|
+
|
148
|
+
# Tray helpers -----------------------------------------------------
|
149
|
+
|
150
|
+
def _resolve_url(self) -> Optional[str]:
|
151
|
+
if self._share_url:
|
152
|
+
return self._share_url
|
153
|
+
return self._local_url
|
154
|
+
|
155
|
+
def _handle_open_webui(
|
156
|
+
self,
|
157
|
+
_icon: Optional[pystray.Icon] = None,
|
158
|
+
_item: Optional[pystray.MenuItem] = None,
|
159
|
+
) -> None:
|
160
|
+
url = self._resolve_url()
|
161
|
+
if url:
|
162
|
+
webbrowser.open(url)
|
163
|
+
LOGGER.debug("Opened browser to %s", url)
|
164
|
+
else:
|
165
|
+
LOGGER.warning("Server URL not yet available; please try again.")
|
166
|
+
|
167
|
+
def _gui_is_running(self) -> bool:
|
168
|
+
"""Return whether the GUI subprocess is currently active."""
|
169
|
+
|
170
|
+
process = self._gui_process
|
171
|
+
if process is None:
|
172
|
+
return False
|
173
|
+
if process.poll() is None:
|
174
|
+
return True
|
175
|
+
self._gui_process = None
|
176
|
+
return False
|
177
|
+
|
178
|
+
def _monitor_gui_process(self, process: subprocess.Popen[Any]) -> None:
|
179
|
+
"""Reset the GUI handle once the subprocess exits."""
|
180
|
+
|
181
|
+
try:
|
182
|
+
process.wait()
|
183
|
+
except Exception as exc: # pragma: no cover - best-effort cleanup
|
184
|
+
LOGGER.debug("GUI process monitor exited with %s", exc)
|
185
|
+
finally:
|
186
|
+
with self._gui_lock:
|
187
|
+
if self._gui_process is process:
|
188
|
+
self._gui_process = None
|
189
|
+
LOGGER.info("Talks Reducer GUI closed")
|
190
|
+
|
191
|
+
def _launch_gui(
|
192
|
+
self,
|
193
|
+
_icon: Optional[pystray.Icon] = None,
|
194
|
+
_item: Optional[pystray.MenuItem] = None,
|
195
|
+
) -> None:
|
196
|
+
"""Launch the Talks Reducer GUI in a background subprocess."""
|
197
|
+
|
198
|
+
with self._gui_lock:
|
199
|
+
if self._gui_is_running():
|
200
|
+
LOGGER.info(
|
201
|
+
"Talks Reducer GUI already running; focusing existing window"
|
202
|
+
)
|
203
|
+
return
|
204
|
+
|
205
|
+
try:
|
206
|
+
LOGGER.info("Launching Talks Reducer GUI via %s", sys.executable)
|
207
|
+
process = subprocess.Popen(
|
208
|
+
[sys.executable, "-m", "talks_reducer.gui", "--no-tray"]
|
209
|
+
)
|
210
|
+
except Exception as exc: # pragma: no cover - platform specific
|
211
|
+
LOGGER.error("Failed to launch Talks Reducer GUI: %s", exc)
|
212
|
+
self._gui_process = None
|
213
|
+
return
|
214
|
+
|
215
|
+
self._gui_process = process
|
216
|
+
|
217
|
+
watcher = threading.Thread(
|
218
|
+
target=self._monitor_gui_process,
|
219
|
+
args=(process,),
|
220
|
+
name="talks-reducer-gui-monitor",
|
221
|
+
daemon=True,
|
222
|
+
)
|
223
|
+
watcher.start()
|
224
|
+
|
225
|
+
def _handle_quit(
|
226
|
+
self,
|
227
|
+
icon: Optional[pystray.Icon] = None,
|
228
|
+
_item: Optional[pystray.MenuItem] = None,
|
229
|
+
) -> None:
|
230
|
+
self.stop()
|
231
|
+
if icon is not None:
|
232
|
+
icon.stop()
|
233
|
+
|
234
|
+
# Public API -------------------------------------------------------
|
235
|
+
|
236
|
+
def run(self) -> None:
|
237
|
+
"""Start the server and block until the tray icon exits."""
|
238
|
+
|
239
|
+
server_thread = threading.Thread(
|
240
|
+
target=self._launch_server, name="talks-reducer-server", daemon=True
|
241
|
+
)
|
242
|
+
server_thread.start()
|
243
|
+
|
244
|
+
if not self._ready_event.wait(timeout=30):
|
245
|
+
raise RuntimeError(
|
246
|
+
"Timed out while waiting for the Talks Reducer server to start."
|
247
|
+
)
|
248
|
+
|
249
|
+
if self._open_browser_on_start:
|
250
|
+
self._handle_open_webui()
|
251
|
+
|
252
|
+
if self._tray_mode == "headless":
|
253
|
+
LOGGER.warning(
|
254
|
+
"Tray icon disabled (tray_mode=headless); press Ctrl+C to stop the server."
|
255
|
+
)
|
256
|
+
try:
|
257
|
+
while not self._stop_event.wait(0.5):
|
258
|
+
pass
|
259
|
+
finally:
|
260
|
+
self.stop()
|
261
|
+
return
|
262
|
+
|
263
|
+
icon_image = _load_icon()
|
264
|
+
menu = pystray.Menu(
|
265
|
+
pystray.MenuItem(
|
266
|
+
"Open GUI",
|
267
|
+
self._launch_gui,
|
268
|
+
default=True,
|
269
|
+
),
|
270
|
+
pystray.MenuItem("Open WebUI", self._handle_open_webui),
|
271
|
+
pystray.MenuItem("Quit", self._handle_quit),
|
272
|
+
)
|
273
|
+
self._icon = pystray.Icon(
|
274
|
+
"talks-reducer", icon_image, "Talks Reducer Server", menu=menu
|
275
|
+
)
|
276
|
+
|
277
|
+
if self._tray_mode == "pystray-detached":
|
278
|
+
LOGGER.info("Running tray icon in detached mode")
|
279
|
+
self._icon.run_detached()
|
280
|
+
try:
|
281
|
+
while not self._stop_event.wait(0.5):
|
282
|
+
pass
|
283
|
+
finally:
|
284
|
+
self.stop()
|
285
|
+
return
|
286
|
+
|
287
|
+
LOGGER.info("Running tray icon in blocking mode")
|
288
|
+
self._icon.run()
|
289
|
+
|
290
|
+
def stop(self) -> None:
|
291
|
+
"""Stop the tray icon and shut down the Gradio server."""
|
292
|
+
|
293
|
+
self._stop_event.set()
|
294
|
+
|
295
|
+
if self._icon is not None:
|
296
|
+
with suppress(Exception):
|
297
|
+
self._icon.visible = False
|
298
|
+
with suppress(Exception):
|
299
|
+
self._icon.stop()
|
300
|
+
|
301
|
+
self._stop_gui()
|
302
|
+
|
303
|
+
if self._server_handle is not None:
|
304
|
+
with suppress(Exception):
|
305
|
+
self._server_handle.close()
|
306
|
+
LOGGER.info("Shut down Talks Reducer server")
|
307
|
+
|
308
|
+
def _stop_gui(self) -> None:
|
309
|
+
"""Terminate the GUI subprocess if it is still running."""
|
310
|
+
|
311
|
+
with self._gui_lock:
|
312
|
+
process = self._gui_process
|
313
|
+
if process is None:
|
314
|
+
return
|
315
|
+
|
316
|
+
if process.poll() is None:
|
317
|
+
LOGGER.info("Stopping Talks Reducer GUI")
|
318
|
+
try:
|
319
|
+
process.terminate()
|
320
|
+
process.wait(timeout=5)
|
321
|
+
except subprocess.TimeoutExpired:
|
322
|
+
LOGGER.warning(
|
323
|
+
"GUI process did not exit cleanly; forcing termination"
|
324
|
+
)
|
325
|
+
process.kill()
|
326
|
+
process.wait(timeout=5)
|
327
|
+
except Exception as exc: # pragma: no cover - defensive cleanup
|
328
|
+
LOGGER.debug("Error while terminating GUI process: %s", exc)
|
329
|
+
|
330
|
+
self._gui_process = None
|
331
|
+
|
332
|
+
|
333
|
+
def main(argv: Optional[Sequence[str]] = None) -> None:
|
334
|
+
"""Launch the Gradio server with a companion system tray icon."""
|
335
|
+
|
336
|
+
parser = argparse.ArgumentParser(
|
337
|
+
description="Launch the Talks Reducer server with a system tray icon."
|
338
|
+
)
|
339
|
+
parser.add_argument(
|
340
|
+
"--host", dest="host", default=None, help="Custom host to bind."
|
341
|
+
)
|
342
|
+
parser.add_argument(
|
343
|
+
"--port",
|
344
|
+
dest="port",
|
345
|
+
type=int,
|
346
|
+
default=9005,
|
347
|
+
help="Port number for the web server (default: 9005).",
|
348
|
+
)
|
349
|
+
parser.add_argument(
|
350
|
+
"--share",
|
351
|
+
action="store_true",
|
352
|
+
help="Create a temporary public Gradio link.",
|
353
|
+
)
|
354
|
+
browser_group = parser.add_mutually_exclusive_group()
|
355
|
+
browser_group.add_argument(
|
356
|
+
"--open-browser",
|
357
|
+
dest="open_browser",
|
358
|
+
action="store_true",
|
359
|
+
help="Automatically open the web interface after startup.",
|
360
|
+
)
|
361
|
+
browser_group.add_argument(
|
362
|
+
"--no-browser",
|
363
|
+
dest="open_browser",
|
364
|
+
action="store_false",
|
365
|
+
help="Do not open the web interface automatically (default).",
|
366
|
+
)
|
367
|
+
parser.set_defaults(open_browser=False)
|
368
|
+
parser.add_argument(
|
369
|
+
"--tray-mode",
|
370
|
+
choices=("pystray", "pystray-detached", "headless"),
|
371
|
+
default="pystray",
|
372
|
+
help=(
|
373
|
+
"Select how the tray runs: foreground pystray (default), detached "
|
374
|
+
"pystray worker, or disable the tray entirely."
|
375
|
+
),
|
376
|
+
)
|
377
|
+
parser.add_argument(
|
378
|
+
"--debug",
|
379
|
+
action="store_true",
|
380
|
+
help="Enable verbose logging for troubleshooting.",
|
381
|
+
)
|
382
|
+
|
383
|
+
args = parser.parse_args(argv)
|
384
|
+
|
385
|
+
logging.basicConfig(
|
386
|
+
level=logging.DEBUG if args.debug else logging.INFO,
|
387
|
+
format="%(asctime)s [%(levelname)s] %(message)s",
|
388
|
+
datefmt="%H:%M:%S",
|
389
|
+
)
|
390
|
+
|
391
|
+
if args.tray_mode != "headless" and PYSTRAY_IMPORT_ERROR is not None:
|
392
|
+
raise RuntimeError(
|
393
|
+
"System tray mode requires the 'pystray' dependency. Install it with "
|
394
|
+
"`pip install pystray` or `pip install talks-reducer[dev]` and try again."
|
395
|
+
) from PYSTRAY_IMPORT_ERROR
|
396
|
+
|
397
|
+
app = _ServerTrayApplication(
|
398
|
+
host=args.host,
|
399
|
+
port=args.port,
|
400
|
+
share=args.share,
|
401
|
+
open_browser=args.open_browser,
|
402
|
+
tray_mode=args.tray_mode,
|
403
|
+
)
|
404
|
+
|
405
|
+
atexit.register(app.stop)
|
406
|
+
|
407
|
+
try:
|
408
|
+
app.run()
|
409
|
+
except KeyboardInterrupt: # pragma: no cover - interactive convenience
|
410
|
+
app.stop()
|
411
|
+
|
412
|
+
|
413
|
+
__all__ = ["main"]
|
414
|
+
|
415
|
+
|
416
|
+
if __name__ == "__main__": # pragma: no cover - convenience entry point
|
417
|
+
main()
|
@@ -0,0 +1,102 @@
|
|
1
|
+
"""Command-line helper for sending videos to the Talks Reducer server."""
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
import argparse
|
6
|
+
import shutil
|
7
|
+
from pathlib import Path
|
8
|
+
from typing import Optional, Sequence, Tuple
|
9
|
+
|
10
|
+
from gradio_client import Client
|
11
|
+
from gradio_client import file as gradio_file
|
12
|
+
|
13
|
+
|
14
|
+
def send_video(
|
15
|
+
input_path: Path,
|
16
|
+
output_path: Optional[Path],
|
17
|
+
server_url: str,
|
18
|
+
small: bool = False,
|
19
|
+
) -> Tuple[Path, str, str]:
|
20
|
+
"""Upload *input_path* to the Gradio server and download the processed video."""
|
21
|
+
|
22
|
+
if not input_path.exists():
|
23
|
+
raise FileNotFoundError(f"Input file does not exist: {input_path}")
|
24
|
+
|
25
|
+
client = Client(server_url)
|
26
|
+
prediction = client.predict(
|
27
|
+
gradio_file(str(input_path)),
|
28
|
+
bool(small),
|
29
|
+
api_name="/process_video",
|
30
|
+
)
|
31
|
+
|
32
|
+
try:
|
33
|
+
_, log_text, summary, download_path = prediction
|
34
|
+
except (TypeError, ValueError) as exc: # pragma: no cover - defensive
|
35
|
+
raise RuntimeError("Unexpected response from server") from exc
|
36
|
+
|
37
|
+
if not download_path:
|
38
|
+
raise RuntimeError("Server did not return a processed file")
|
39
|
+
|
40
|
+
download_source = Path(str(download_path))
|
41
|
+
if output_path is None:
|
42
|
+
destination = Path.cwd() / download_source.name
|
43
|
+
else:
|
44
|
+
destination = output_path
|
45
|
+
if destination.is_dir():
|
46
|
+
destination = destination / download_source.name
|
47
|
+
|
48
|
+
destination.parent.mkdir(parents=True, exist_ok=True)
|
49
|
+
if download_source.resolve() != destination.resolve():
|
50
|
+
shutil.copy2(download_source, destination)
|
51
|
+
|
52
|
+
return destination, summary, log_text
|
53
|
+
|
54
|
+
|
55
|
+
def _build_parser() -> argparse.ArgumentParser:
|
56
|
+
parser = argparse.ArgumentParser(
|
57
|
+
description="Send a video to a running talks-reducer server and download the result.",
|
58
|
+
)
|
59
|
+
parser.add_argument("input", type=Path, help="Path to the video file to upload.")
|
60
|
+
parser.add_argument(
|
61
|
+
"--server",
|
62
|
+
default="http://127.0.0.1:9005/",
|
63
|
+
help="Base URL for the talks-reducer server (default: http://127.0.0.1:9005/).",
|
64
|
+
)
|
65
|
+
parser.add_argument(
|
66
|
+
"--output",
|
67
|
+
type=Path,
|
68
|
+
default=None,
|
69
|
+
help="Where to store the processed video. Defaults to the working directory.",
|
70
|
+
)
|
71
|
+
parser.add_argument(
|
72
|
+
"--small",
|
73
|
+
action="store_true",
|
74
|
+
help="Toggle the 'Small video' preset before processing.",
|
75
|
+
)
|
76
|
+
parser.add_argument(
|
77
|
+
"--print-log",
|
78
|
+
action="store_true",
|
79
|
+
help="Print the server log after processing completes.",
|
80
|
+
)
|
81
|
+
return parser
|
82
|
+
|
83
|
+
|
84
|
+
def main(argv: Optional[Sequence[str]] = None) -> None:
|
85
|
+
parser = _build_parser()
|
86
|
+
args = parser.parse_args(argv)
|
87
|
+
|
88
|
+
destination, summary, log_text = send_video(
|
89
|
+
input_path=args.input.expanduser(),
|
90
|
+
output_path=args.output.expanduser() if args.output else None,
|
91
|
+
server_url=args.server,
|
92
|
+
small=args.small,
|
93
|
+
)
|
94
|
+
|
95
|
+
print(summary)
|
96
|
+
print(f"Saved processed video to {destination}")
|
97
|
+
if args.print_log:
|
98
|
+
print("\nServer log:\n" + log_text)
|
99
|
+
|
100
|
+
|
101
|
+
if __name__ == "__main__": # pragma: no cover
|
102
|
+
main()
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: talks-reducer
|
3
|
-
Version: 0.5.
|
3
|
+
Version: 0.5.2
|
4
4
|
Summary: CLI for speeding up long-form talks by removing silence
|
5
5
|
Author: Talks Reducer Maintainers
|
6
6
|
License-Expression: MIT
|
@@ -13,6 +13,7 @@ Requires-Dist: numpy>=1.22.0
|
|
13
13
|
Requires-Dist: tqdm>=4.65.0
|
14
14
|
Requires-Dist: tkinterdnd2>=0.3.0
|
15
15
|
Requires-Dist: Pillow>=9.0.0
|
16
|
+
Requires-Dist: pystray>=0.19.5
|
16
17
|
Requires-Dist: imageio-ffmpeg>=0.4.8
|
17
18
|
Requires-Dist: gradio>=4.0.0
|
18
19
|
Provides-Extra: dev
|
@@ -44,7 +45,9 @@ See [CHANGELOG.md](CHANGELOG.md).
|
|
44
45
|
Go to the [releases page](https://github.com/popstas/talks-reducer/releases) and download the appropriate artifact:
|
45
46
|
|
46
47
|
- **Windows** — `talks-reducer-windows-0.4.0.zip`
|
47
|
-
- **macOS** — `talks-reducer.app.zip`
|
48
|
+
- **macOS** — `talks-reducer.app.zip`
|
49
|
+
|
50
|
+
> **Troubleshooting:** If launching the bundle (or running `python talks_reducer/gui.py`) prints `macOS 26 (2600) or later required, have instead 16 (1600)!`, make sure you're using a Python build that ships a modern Tk. The stock [python.org 3.13.5 installer](https://www.python.org/downloads/release/python-3135/) includes Tk 8.6 and has been verified to work.
|
48
51
|
|
49
52
|
When extracted on Windows the bundled `talks-reducer.exe` behaves like the
|
50
53
|
`python talks_reducer/gui.py` entry point: double-clicking it launches the GUI
|
@@ -82,10 +85,56 @@ Prefer a lightweight browser interface? Launch the Gradio-powered simple mode wi
|
|
82
85
|
talks-reducer server
|
83
86
|
```
|
84
87
|
|
88
|
+
Want the server to live in your system tray instead of a terminal window? Use:
|
89
|
+
|
90
|
+
```sh
|
91
|
+
talks-reducer server-tray
|
92
|
+
```
|
93
|
+
|
94
|
+
Pass `--debug` to print verbose logs about the tray icon lifecycle, and
|
95
|
+
`--tray-mode pystray-detached` to try pystray's alternate detached runner. If
|
96
|
+
the icon backend refuses to appear, fall back to `--tray-mode headless` to keep
|
97
|
+
the web server running without a tray process. The tray menu includes an **Open GUI**
|
98
|
+
item (also triggered by double-clicking the icon) that launches the desktop
|
99
|
+
Talks Reducer interface alongside an **Open WebUI** entry that opens the Gradio
|
100
|
+
page in your browser. Close the GUI window to return to the tray without
|
101
|
+
stopping the server. Launching the GUI directly now starts the tray-backed
|
102
|
+
server in the background before the window appears so the icon stays available
|
103
|
+
after you close it; add `--no-tray` when running `python -m talks_reducer.gui`
|
104
|
+
if you prefer to skip the background server entirely. The tray command itself
|
105
|
+
never launches the GUI automatically, so use the menu item (or rerun the GUI
|
106
|
+
with `--no-tray`) whenever you want to reopen it. The tray no longer opens a
|
107
|
+
browser automatically—pass `--open-browser` if you prefer the web page to
|
108
|
+
launch as soon as the server is ready.
|
109
|
+
|
85
110
|
This opens a local web page featuring a drag-and-drop upload zone, a **Small video** checkbox that mirrors the CLI preset, a live
|
86
111
|
progress indicator, and automatic previews of the processed output. Once the job completes you can inspect the resulting compression
|
87
112
|
ratio and download the rendered video directly from the page.
|
88
113
|
|
114
|
+
### Uploading and retrieving a processed video
|
115
|
+
|
116
|
+
1. Open the printed `http://localhost:<port>` address (the default port is `9005`).
|
117
|
+
2. Drag a video onto the **Video file** drop zone or click to browse and select one from disk.
|
118
|
+
3. **Small video** starts enabled to apply the 720p/128 kbps preset. Clear the box before the upload finishes if you want to keep the original resolution and bitrate.
|
119
|
+
4. Wait for the progress bar and log to report completion—the interface queues work automatically after the file arrives.
|
120
|
+
5. Watch the processed preview in the **Processed video** player and click **Download processed file** to save the result locally.
|
121
|
+
|
122
|
+
Need to change where the server listens? Run `talks-reducer server --host 0.0.0.0 --port 7860` (or any other port) to bind to a
|
123
|
+
different address.
|
124
|
+
|
125
|
+
### Automating uploads from the command line
|
126
|
+
|
127
|
+
Prefer to script uploads instead of using the browser UI? Start the server and use the bundled helper to submit a job and save
|
128
|
+
the processed video locally:
|
129
|
+
|
130
|
+
```sh
|
131
|
+
python -m talks_reducer.service_client --server http://127.0.0.1:9005/ --input demo.mp4 --output output/demo_processed.mp4
|
132
|
+
```
|
133
|
+
|
134
|
+
The helper wraps the Gradio API exposed by `server.py`, waits for processing to complete, then copies the rendered file to the
|
135
|
+
path you provide. Pass `--small` to mirror the **Small video** checkbox or `--print-log` to stream the server log after the
|
136
|
+
download finishes.
|
137
|
+
|
89
138
|
## Contributing
|
90
139
|
See `CONTRIBUTION.md` for development setup details and guidance on sharing improvements.
|
91
140
|
|
@@ -0,0 +1,20 @@
|
|
1
|
+
talks_reducer/__about__.py,sha256=IrybQ0W3057SlpgRUduOAhBgwbyN7X0mC3g7VQj2SxA,92
|
2
|
+
talks_reducer/__init__.py,sha256=Kzh1hXaw6Vq3DyTqrnJGOq8pn0P8lvaDcsg1bFUjFKk,208
|
3
|
+
talks_reducer/__main__.py,sha256=azR_vh8HFPLaOnh-L6gUFWsL67I6iHtbeH5rQhsipGY,299
|
4
|
+
talks_reducer/audio.py,sha256=sjHMeY0H9ESG-Gn5BX0wFRBX7sXjWwsgS8u9Vb0bJ88,4396
|
5
|
+
talks_reducer/chunks.py,sha256=IpdZxRFPURSG5wP-OQ_p09CVP8wcKwIFysV29zOTSWI,2959
|
6
|
+
talks_reducer/cli.py,sha256=VOFZni7rl2CTCK3aLUO_iCV6PNva9ylIrmxxwNltQG4,8031
|
7
|
+
talks_reducer/ffmpeg.py,sha256=dsHBOBcr5XCSg0q3xmzLOcibBiEdyrXdEQa-ze5vQsM,12551
|
8
|
+
talks_reducer/gui.py,sha256=-zBvuPv9T4Z1mZ90QwRZEUUY65mUpWRKCgRZyV5aEPM,61121
|
9
|
+
talks_reducer/models.py,sha256=6Q_8rmHLyImXp88D4B7ptTbFaH_xXa_yxs8A2dypz2Y,2004
|
10
|
+
talks_reducer/pipeline.py,sha256=JnWa84sMwYncGv7crhGLZu0cW1Xx0eGQyFP-nLp-DHk,12222
|
11
|
+
talks_reducer/progress.py,sha256=Mh43M6VWhjjUv9CI22xfD2EJ_7Aq3PCueqefQ9Bd5-o,4565
|
12
|
+
talks_reducer/server.py,sha256=Czc1q6N7LJKsq-dBEum21zw31kA1uXBKC8T1-NtiscA,11235
|
13
|
+
talks_reducer/server_tray.py,sha256=O8eiu31iWFvP2AROFG1OZ83n0aePtuSZqd7RE6NAqQw,13377
|
14
|
+
talks_reducer/service_client.py,sha256=Hv3hKBUn2n7M5hLUVHamNphUdMt9bCXUrJaJMZHlE0Q,3088
|
15
|
+
talks_reducer-0.5.2.dist-info/licenses/LICENSE,sha256=jN17mHNR3e84awmH3AbpWBcBDBzPxEH0rcOFoj1s7sQ,1124
|
16
|
+
talks_reducer-0.5.2.dist-info/METADATA,sha256=shQxqR1Z6PayVws-5cZ8FAhTeR_1jo7pfpEPFDloN4o,6702
|
17
|
+
talks_reducer-0.5.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
18
|
+
talks_reducer-0.5.2.dist-info/entry_points.txt,sha256=X2pjoh2vWBXXExVWorv1mbA1aTEVP3fyuZH4AixqZK4,208
|
19
|
+
talks_reducer-0.5.2.dist-info/top_level.txt,sha256=pJWGcy__LR9JIEKH3QJyFmk9XrIsiFtqvuMNxFdIzDU,14
|
20
|
+
talks_reducer-0.5.2.dist-info/RECORD,,
|
@@ -1,18 +0,0 @@
|
|
1
|
-
talks_reducer/__about__.py,sha256=RhSau8kONixzPMrozb3d8HJbGwsQmAY3l5_HbL3elh4,92
|
2
|
-
talks_reducer/__init__.py,sha256=Kzh1hXaw6Vq3DyTqrnJGOq8pn0P8lvaDcsg1bFUjFKk,208
|
3
|
-
talks_reducer/__main__.py,sha256=azR_vh8HFPLaOnh-L6gUFWsL67I6iHtbeH5rQhsipGY,299
|
4
|
-
talks_reducer/audio.py,sha256=sjHMeY0H9ESG-Gn5BX0wFRBX7sXjWwsgS8u9Vb0bJ88,4396
|
5
|
-
talks_reducer/chunks.py,sha256=IpdZxRFPURSG5wP-OQ_p09CVP8wcKwIFysV29zOTSWI,2959
|
6
|
-
talks_reducer/cli.py,sha256=9Lj47GTtvr1feYBrNtQ2aB3r4sLquLODMZk_K_YAIEk,7990
|
7
|
-
talks_reducer/ffmpeg.py,sha256=Joqtkq-bktP-Hq3j3I394FYB_VQ-7GyF0n7bqTiknrg,12356
|
8
|
-
talks_reducer/gui.py,sha256=6OTUfIMH30XBOFq-BYZmxnODp0HhW3oj7CcTqLdpKyI,59739
|
9
|
-
talks_reducer/models.py,sha256=Ax7OIV7WECRROi5km-Se0Z1LQsLxd5J7GnGXDbWrNjg,1197
|
10
|
-
talks_reducer/pipeline.py,sha256=kemU_Txoh38jLzJCjy0HvjUS1gtvmVItnxXhlZcdw5Y,12195
|
11
|
-
talks_reducer/progress.py,sha256=Mh43M6VWhjjUv9CI22xfD2EJ_7Aq3PCueqefQ9Bd5-o,4565
|
12
|
-
talks_reducer/server.py,sha256=r5P7fGfU9SGxwPYDaSsSnEllzwjlombOJ-FF8B5iAZQ,11128
|
13
|
-
talks_reducer-0.5.0.dist-info/licenses/LICENSE,sha256=jN17mHNR3e84awmH3AbpWBcBDBzPxEH0rcOFoj1s7sQ,1124
|
14
|
-
talks_reducer-0.5.0.dist-info/METADATA,sha256=Rrw6kiDxbQT7q6I0QnbWcNxsJsMLLe0z145f4Zr9kBM,3636
|
15
|
-
talks_reducer-0.5.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
16
|
-
talks_reducer-0.5.0.dist-info/entry_points.txt,sha256=no-NVP5Z9LrzaJL4-2ltKe9IkLZo8dQ32zilIb1gbZE,149
|
17
|
-
talks_reducer-0.5.0.dist-info/top_level.txt,sha256=pJWGcy__LR9JIEKH3QJyFmk9XrIsiFtqvuMNxFdIzDU,14
|
18
|
-
talks_reducer-0.5.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|