voice-mode 2.33.3__py3-none-any.whl → 2.34.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- voice_mode/__version__.py +1 -1
- voice_mode/frontend/.next/BUILD_ID +1 -1
- voice_mode/frontend/.next/app-build-manifest.json +5 -5
- voice_mode/frontend/.next/build-manifest.json +3 -3
- voice_mode/frontend/.next/next-minimal-server.js.nft.json +1 -1
- voice_mode/frontend/.next/next-server.js.nft.json +1 -1
- voice_mode/frontend/.next/prerender-manifest.json +1 -1
- voice_mode/frontend/.next/required-server-files.json +1 -1
- voice_mode/frontend/.next/server/app/_not-found/page.js +1 -1
- voice_mode/frontend/.next/server/app/_not-found/page_client-reference-manifest.js +1 -1
- voice_mode/frontend/.next/server/app/_not-found.html +1 -1
- voice_mode/frontend/.next/server/app/_not-found.rsc +1 -1
- voice_mode/frontend/.next/server/app/api/connection-details/route.js +2 -2
- voice_mode/frontend/.next/server/app/favicon.ico/route.js +2 -2
- voice_mode/frontend/.next/server/app/index.html +1 -1
- voice_mode/frontend/.next/server/app/index.rsc +2 -2
- voice_mode/frontend/.next/server/app/page.js +2 -2
- voice_mode/frontend/.next/server/app/page_client-reference-manifest.js +1 -1
- voice_mode/frontend/.next/server/chunks/994.js +2 -2
- voice_mode/frontend/.next/server/middleware-build-manifest.js +1 -1
- voice_mode/frontend/.next/server/next-font-manifest.js +1 -1
- voice_mode/frontend/.next/server/next-font-manifest.json +1 -1
- voice_mode/frontend/.next/server/pages/404.html +1 -1
- voice_mode/frontend/.next/server/pages/500.html +1 -1
- voice_mode/frontend/.next/server/server-reference-manifest.json +1 -1
- voice_mode/frontend/.next/standalone/.next/BUILD_ID +1 -1
- voice_mode/frontend/.next/standalone/.next/app-build-manifest.json +5 -5
- voice_mode/frontend/.next/standalone/.next/build-manifest.json +3 -3
- voice_mode/frontend/.next/standalone/.next/prerender-manifest.json +1 -1
- voice_mode/frontend/.next/standalone/.next/required-server-files.json +1 -1
- voice_mode/frontend/.next/standalone/.next/server/app/_not-found/page.js +1 -1
- voice_mode/frontend/.next/standalone/.next/server/app/_not-found/page_client-reference-manifest.js +1 -1
- voice_mode/frontend/.next/standalone/.next/server/app/_not-found.html +1 -1
- voice_mode/frontend/.next/standalone/.next/server/app/_not-found.rsc +1 -1
- voice_mode/frontend/.next/standalone/.next/server/app/api/connection-details/route.js +2 -2
- voice_mode/frontend/.next/standalone/.next/server/app/favicon.ico/route.js +2 -2
- voice_mode/frontend/.next/standalone/.next/server/app/index.html +1 -1
- voice_mode/frontend/.next/standalone/.next/server/app/index.rsc +2 -2
- voice_mode/frontend/.next/standalone/.next/server/app/page.js +2 -2
- voice_mode/frontend/.next/standalone/.next/server/app/page_client-reference-manifest.js +1 -1
- voice_mode/frontend/.next/standalone/.next/server/chunks/994.js +2 -2
- voice_mode/frontend/.next/standalone/.next/server/middleware-build-manifest.js +1 -1
- voice_mode/frontend/.next/standalone/.next/server/next-font-manifest.js +1 -1
- voice_mode/frontend/.next/standalone/.next/server/next-font-manifest.json +1 -1
- voice_mode/frontend/.next/standalone/.next/server/pages/404.html +1 -1
- voice_mode/frontend/.next/standalone/.next/server/pages/500.html +1 -1
- voice_mode/frontend/.next/standalone/.next/server/server-reference-manifest.json +1 -1
- voice_mode/frontend/.next/standalone/server.js +1 -1
- voice_mode/frontend/.next/static/chunks/app/{layout-0668b54a9a8a3d88.js → layout-8365e4b0b502e273.js} +1 -1
- voice_mode/frontend/.next/static/chunks/app/{page-b2295ed8502c5617.js → page-56c513d637021d6c.js} +1 -1
- voice_mode/frontend/.next/static/chunks/main-app-c2e7596a7588072c.js +1 -0
- voice_mode/frontend/.next/trace +43 -43
- voice_mode/frontend/.next/types/app/api/connection-details/route.ts +1 -1
- voice_mode/frontend/.next/types/app/layout.ts +1 -1
- voice_mode/frontend/.next/types/app/page.ts +1 -1
- voice_mode/tools/services/kokoro/install.py +207 -2
- voice_mode/tools/services/whisper/install.py +331 -187
- {voice_mode-2.33.3.dist-info → voice_mode-2.34.0.dist-info}/METADATA +1 -1
- {voice_mode-2.33.3.dist-info → voice_mode-2.34.0.dist-info}/RECORD +63 -63
- voice_mode/frontend/.next/static/chunks/main-app-78bb3214ad53942b.js +0 -1
- /voice_mode/frontend/.next/static/{G6vnbmBPF9PSjWGgrWUux → VjuJp4LAYPX7t1KdLNI3o}/_buildManifest.js +0 -0
- /voice_mode/frontend/.next/static/{G6vnbmBPF9PSjWGgrWUux → VjuJp4LAYPX7t1KdLNI3o}/_ssgManifest.js +0 -0
- {voice_mode-2.33.3.dist-info → voice_mode-2.34.0.dist-info}/WHEEL +0 -0
- {voice_mode-2.33.3.dist-info → voice_mode-2.34.0.dist-info}/entry_points.txt +0 -0
@@ -30,6 +30,257 @@ from voice_mode.utils.gpu_detection import detect_gpu
|
|
30
30
|
logger = logging.getLogger("voice-mode")
|
31
31
|
|
32
32
|
|
33
|
+
async def update_whisper_service_files(
|
34
|
+
install_dir: str,
|
35
|
+
voicemode_dir: str,
|
36
|
+
auto_enable: Optional[bool] = None
|
37
|
+
) -> Dict[str, Any]:
|
38
|
+
"""Update service files (plist/systemd) for whisper service.
|
39
|
+
|
40
|
+
This function updates the service files without reinstalling whisper itself.
|
41
|
+
It ensures paths are properly expanded and templates are up to date.
|
42
|
+
|
43
|
+
Returns:
|
44
|
+
Dict with success status and details about what was updated
|
45
|
+
"""
|
46
|
+
system = platform.system()
|
47
|
+
result = {"success": False, "updated": False}
|
48
|
+
|
49
|
+
# Create bin directory if it doesn't exist
|
50
|
+
bin_dir = os.path.join(install_dir, "bin")
|
51
|
+
os.makedirs(bin_dir, exist_ok=True)
|
52
|
+
|
53
|
+
# Create/update start script
|
54
|
+
logger.info("Updating whisper-server start script...")
|
55
|
+
|
56
|
+
# Load template script
|
57
|
+
template_content = None
|
58
|
+
source_template = Path(__file__).parent.parent.parent.parent / "templates" / "scripts" / "start-whisper-server.sh"
|
59
|
+
if source_template.exists():
|
60
|
+
logger.info(f"Loading template from source: {source_template}")
|
61
|
+
template_content = source_template.read_text()
|
62
|
+
else:
|
63
|
+
try:
|
64
|
+
template_resource = files("voice_mode.templates.scripts").joinpath("start-whisper-server.sh")
|
65
|
+
template_content = template_resource.read_text()
|
66
|
+
logger.info("Loaded template from package resources")
|
67
|
+
except Exception as e:
|
68
|
+
logger.warning(f"Failed to load template script: {e}. Using fallback inline script.")
|
69
|
+
|
70
|
+
# Use fallback inline script if template not found
|
71
|
+
if template_content is None:
|
72
|
+
template_content = f"""#!/bin/bash
|
73
|
+
|
74
|
+
# Whisper Service Startup Script
|
75
|
+
# This script is used by both macOS (launchd) and Linux (systemd) to start the whisper service
|
76
|
+
# It sources the voicemode.env file to get configuration, especially VOICEMODE_WHISPER_MODEL
|
77
|
+
|
78
|
+
# Determine whisper directory (script is in bin/, whisper root is parent)
|
79
|
+
SCRIPT_DIR="$(cd "$(dirname "${{BASH_SOURCE[0]}}")" && pwd)"
|
80
|
+
WHISPER_DIR="$(dirname "$SCRIPT_DIR")"
|
81
|
+
|
82
|
+
# Voicemode configuration directory
|
83
|
+
VOICEMODE_DIR="$HOME/.voicemode"
|
84
|
+
LOG_DIR="$VOICEMODE_DIR/logs/whisper"
|
85
|
+
|
86
|
+
# Create log directory if it doesn't exist
|
87
|
+
mkdir -p "$LOG_DIR"
|
88
|
+
|
89
|
+
# Log file for this script (separate from whisper server logs)
|
90
|
+
STARTUP_LOG="$LOG_DIR/startup.log"
|
91
|
+
|
92
|
+
# Source voicemode configuration if it exists
|
93
|
+
if [ -f "$VOICEMODE_DIR/voicemode.env" ]; then
|
94
|
+
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Sourcing voicemode.env" >> "$STARTUP_LOG"
|
95
|
+
source "$VOICEMODE_DIR/voicemode.env"
|
96
|
+
else
|
97
|
+
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Warning: voicemode.env not found" >> "$STARTUP_LOG"
|
98
|
+
fi
|
99
|
+
|
100
|
+
# Model selection with environment variable support
|
101
|
+
MODEL_NAME="${{VOICEMODE_WHISPER_MODEL:-base}}"
|
102
|
+
MODEL_PATH="$WHISPER_DIR/models/ggml-$MODEL_NAME.bin"
|
103
|
+
|
104
|
+
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Starting whisper-server with model: $MODEL_NAME" >> "$STARTUP_LOG"
|
105
|
+
|
106
|
+
# Check if model exists
|
107
|
+
if [ ! -f "$MODEL_PATH" ]; then
|
108
|
+
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Error: Model $MODEL_NAME not found at $MODEL_PATH" >> "$STARTUP_LOG"
|
109
|
+
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Available models:" >> "$STARTUP_LOG"
|
110
|
+
ls -1 "$WHISPER_DIR/models/" 2>/dev/null | grep "^ggml-.*\\.bin$" >> "$STARTUP_LOG"
|
111
|
+
|
112
|
+
# Try to find any available model as fallback
|
113
|
+
FALLBACK_MODEL=$(ls -1 "$WHISPER_DIR/models/" 2>/dev/null | grep "^ggml-.*\\.bin$" | head -1)
|
114
|
+
if [ -n "$FALLBACK_MODEL" ]; then
|
115
|
+
MODEL_PATH="$WHISPER_DIR/models/$FALLBACK_MODEL"
|
116
|
+
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Using fallback model: $FALLBACK_MODEL" >> "$STARTUP_LOG"
|
117
|
+
else
|
118
|
+
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Fatal: No whisper models found" >> "$STARTUP_LOG"
|
119
|
+
exit 1
|
120
|
+
fi
|
121
|
+
fi
|
122
|
+
|
123
|
+
# Port configuration (with environment variable support)
|
124
|
+
WHISPER_PORT="${{VOICEMODE_WHISPER_PORT:-2022}}"
|
125
|
+
|
126
|
+
# Determine server binary location
|
127
|
+
# Check new CMake build location first, then legacy location
|
128
|
+
if [ -f "$WHISPER_DIR/build/bin/whisper-server" ]; then
|
129
|
+
SERVER_BIN="$WHISPER_DIR/build/bin/whisper-server"
|
130
|
+
elif [ -f "$WHISPER_DIR/server" ]; then
|
131
|
+
SERVER_BIN="$WHISPER_DIR/server"
|
132
|
+
else
|
133
|
+
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Error: whisper-server binary not found" >> "$STARTUP_LOG"
|
134
|
+
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Checked: $WHISPER_DIR/build/bin/whisper-server" >> "$STARTUP_LOG"
|
135
|
+
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Checked: $WHISPER_DIR/server" >> "$STARTUP_LOG"
|
136
|
+
exit 1
|
137
|
+
fi
|
138
|
+
|
139
|
+
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Using binary: $SERVER_BIN" >> "$STARTUP_LOG"
|
140
|
+
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Model path: $MODEL_PATH" >> "$STARTUP_LOG"
|
141
|
+
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Port: $WHISPER_PORT" >> "$STARTUP_LOG"
|
142
|
+
|
143
|
+
# Start whisper-server
|
144
|
+
# Using exec to replace this script process with whisper-server
|
145
|
+
cd "$WHISPER_DIR"
|
146
|
+
exec "$SERVER_BIN" \\
|
147
|
+
--host 0.0.0.0 \\
|
148
|
+
--port "$WHISPER_PORT" \\
|
149
|
+
--model "$MODEL_PATH" \\
|
150
|
+
--inference-path /v1/audio/transcriptions \\
|
151
|
+
--threads 8
|
152
|
+
"""
|
153
|
+
|
154
|
+
start_script_path = os.path.join(bin_dir, "start-whisper-server.sh")
|
155
|
+
with open(start_script_path, 'w') as f:
|
156
|
+
f.write(template_content)
|
157
|
+
os.chmod(start_script_path, 0o755)
|
158
|
+
|
159
|
+
# Update service files based on platform
|
160
|
+
if system == "Darwin":
|
161
|
+
logger.info("Updating launchagent for whisper-server...")
|
162
|
+
launchagents_dir = os.path.expanduser("~/Library/LaunchAgents")
|
163
|
+
os.makedirs(launchagents_dir, exist_ok=True)
|
164
|
+
|
165
|
+
# Create log directory
|
166
|
+
log_dir = os.path.join(voicemode_dir, 'logs', 'whisper')
|
167
|
+
os.makedirs(log_dir, exist_ok=True)
|
168
|
+
|
169
|
+
plist_name = "com.voicemode.whisper.plist"
|
170
|
+
plist_path = os.path.join(launchagents_dir, plist_name)
|
171
|
+
|
172
|
+
# Load plist template
|
173
|
+
source_template = Path(__file__).parent.parent.parent.parent / "templates" / "launchd" / "com.voicemode.whisper.plist"
|
174
|
+
if source_template.exists():
|
175
|
+
logger.info(f"Loading plist template from source: {source_template}")
|
176
|
+
plist_content = source_template.read_text()
|
177
|
+
else:
|
178
|
+
template_resource = files("voice_mode.templates.launchd").joinpath("com.voicemode.whisper.plist")
|
179
|
+
plist_content = template_resource.read_text()
|
180
|
+
logger.info("Loaded plist template from package resources")
|
181
|
+
|
182
|
+
# Replace placeholders with expanded paths
|
183
|
+
plist_content = plist_content.replace("{START_SCRIPT_PATH}", start_script_path)
|
184
|
+
plist_content = plist_content.replace("{LOG_DIR}", os.path.join(voicemode_dir, 'logs'))
|
185
|
+
plist_content = plist_content.replace("{INSTALL_DIR}", install_dir)
|
186
|
+
|
187
|
+
# Unload if already loaded (ignore errors)
|
188
|
+
try:
|
189
|
+
subprocess.run(["launchctl", "unload", plist_path], capture_output=True)
|
190
|
+
except:
|
191
|
+
pass
|
192
|
+
|
193
|
+
# Write updated plist
|
194
|
+
with open(plist_path, 'w') as f:
|
195
|
+
f.write(plist_content)
|
196
|
+
|
197
|
+
result["success"] = True
|
198
|
+
result["updated"] = True
|
199
|
+
result["plist_path"] = plist_path
|
200
|
+
result["start_script"] = start_script_path
|
201
|
+
|
202
|
+
# Handle auto_enable if specified
|
203
|
+
if auto_enable is None:
|
204
|
+
auto_enable = SERVICE_AUTO_ENABLE
|
205
|
+
|
206
|
+
if auto_enable:
|
207
|
+
logger.info("Auto-enabling whisper service...")
|
208
|
+
from voice_mode.tools.service import enable_service
|
209
|
+
enable_result = await enable_service("whisper")
|
210
|
+
if "✅" in enable_result:
|
211
|
+
result["enabled"] = True
|
212
|
+
else:
|
213
|
+
logger.warning(f"Auto-enable failed: {enable_result}")
|
214
|
+
result["enabled"] = False
|
215
|
+
|
216
|
+
elif system == "Linux":
|
217
|
+
logger.info("Updating systemd user service for whisper-server...")
|
218
|
+
systemd_user_dir = os.path.expanduser("~/.config/systemd/user")
|
219
|
+
os.makedirs(systemd_user_dir, exist_ok=True)
|
220
|
+
|
221
|
+
# Create log directory
|
222
|
+
log_dir = os.path.join(voicemode_dir, 'logs', 'whisper')
|
223
|
+
os.makedirs(log_dir, exist_ok=True)
|
224
|
+
|
225
|
+
service_name = "voicemode-whisper.service"
|
226
|
+
service_path = os.path.join(systemd_user_dir, service_name)
|
227
|
+
|
228
|
+
service_content = f"""[Unit]
|
229
|
+
Description=Whisper.cpp Speech Recognition Server
|
230
|
+
After=network.target
|
231
|
+
|
232
|
+
[Service]
|
233
|
+
Type=simple
|
234
|
+
ExecStart={start_script_path}
|
235
|
+
Restart=on-failure
|
236
|
+
RestartSec=10
|
237
|
+
WorkingDirectory={install_dir}
|
238
|
+
StandardOutput=append:{os.path.join(voicemode_dir, 'logs', 'whisper', 'whisper.out.log')}
|
239
|
+
StandardError=append:{os.path.join(voicemode_dir, 'logs', 'whisper', 'whisper.err.log')}
|
240
|
+
Environment="PATH=/usr/local/bin:/usr/bin:/bin:/usr/local/cuda/bin"
|
241
|
+
|
242
|
+
[Install]
|
243
|
+
WantedBy=default.target
|
244
|
+
"""
|
245
|
+
|
246
|
+
with open(service_path, 'w') as f:
|
247
|
+
f.write(service_content)
|
248
|
+
|
249
|
+
# Reload systemd
|
250
|
+
try:
|
251
|
+
subprocess.run(["systemctl", "--user", "daemon-reload"], check=True)
|
252
|
+
result["success"] = True
|
253
|
+
result["updated"] = True
|
254
|
+
result["service_path"] = service_path
|
255
|
+
result["start_script"] = start_script_path
|
256
|
+
except subprocess.CalledProcessError as e:
|
257
|
+
logger.warning(f"Failed to reload systemd: {e}")
|
258
|
+
result["success"] = True # Still consider it success if file was written
|
259
|
+
result["updated"] = True
|
260
|
+
result["service_path"] = service_path
|
261
|
+
result["start_script"] = start_script_path
|
262
|
+
|
263
|
+
# Handle auto_enable if specified
|
264
|
+
if auto_enable is None:
|
265
|
+
auto_enable = SERVICE_AUTO_ENABLE
|
266
|
+
|
267
|
+
if auto_enable:
|
268
|
+
logger.info("Auto-enabling whisper service...")
|
269
|
+
from voice_mode.tools.service import enable_service
|
270
|
+
enable_result = await enable_service("whisper")
|
271
|
+
if "✅" in enable_result:
|
272
|
+
result["enabled"] = True
|
273
|
+
else:
|
274
|
+
logger.warning(f"Auto-enable failed: {enable_result}")
|
275
|
+
result["enabled"] = False
|
276
|
+
|
277
|
+
else:
|
278
|
+
result["success"] = False
|
279
|
+
result["error"] = f"Unsupported platform: {system}"
|
280
|
+
|
281
|
+
return result
|
282
|
+
|
283
|
+
|
33
284
|
@mcp.tool()
|
34
285
|
async def whisper_install(
|
35
286
|
install_dir: Optional[str] = None,
|
@@ -87,17 +338,39 @@ async def whisper_install(
|
|
87
338
|
|
88
339
|
# Check if already installed
|
89
340
|
if os.path.exists(install_dir) and not force_reinstall:
|
90
|
-
if os.path.exists(os.path.join(install_dir, "main")):
|
341
|
+
if os.path.exists(os.path.join(install_dir, "main")) or os.path.exists(os.path.join(install_dir, "build", "bin", "whisper-cli")):
|
91
342
|
# Check if the requested version is already installed
|
92
343
|
if is_version_installed(Path(install_dir), version):
|
93
344
|
current_version = get_current_version(Path(install_dir))
|
345
|
+
|
346
|
+
# Always update service files even if whisper is already installed
|
347
|
+
logger.info("Whisper is already installed, updating service files...")
|
348
|
+
service_update_result = await update_whisper_service_files(
|
349
|
+
install_dir=install_dir,
|
350
|
+
voicemode_dir=voicemode_dir,
|
351
|
+
auto_enable=auto_enable
|
352
|
+
)
|
353
|
+
|
354
|
+
model_path = os.path.join(install_dir, "models", f"ggml-{model}.bin")
|
355
|
+
|
356
|
+
# Build response message
|
357
|
+
message = f"whisper.cpp version {current_version} already installed."
|
358
|
+
if service_update_result.get("updated"):
|
359
|
+
message += " Service files updated."
|
360
|
+
if service_update_result.get("enabled"):
|
361
|
+
message += " Service auto-enabled."
|
362
|
+
|
94
363
|
return {
|
95
364
|
"success": True,
|
96
365
|
"install_path": install_dir,
|
97
|
-
"model_path":
|
366
|
+
"model_path": model_path,
|
98
367
|
"already_installed": True,
|
368
|
+
"service_files_updated": service_update_result.get("updated", False),
|
99
369
|
"version": current_version,
|
100
|
-
"
|
370
|
+
"plist_path": service_update_result.get("plist_path"),
|
371
|
+
"service_path": service_update_result.get("service_path"),
|
372
|
+
"start_script": service_update_result.get("start_script"),
|
373
|
+
"message": message
|
101
374
|
}
|
102
375
|
|
103
376
|
# Detect system
|
@@ -219,8 +492,7 @@ async def whisper_install(
|
|
219
492
|
if is_macos:
|
220
493
|
# On macOS, always enable Metal
|
221
494
|
cmake_flags.append("-DGGML_METAL=ON")
|
222
|
-
# On Apple Silicon, also enable Core ML
|
223
|
-
# This allows using CoreML models if available, but falls back to Metal if not
|
495
|
+
# On Apple Silicon, also enable Core ML for better performance
|
224
496
|
if platform.machine() == "arm64":
|
225
497
|
cmake_flags.append("-DWHISPER_COREML=ON")
|
226
498
|
cmake_flags.append("-DWHISPER_COREML_ALLOW_FALLBACK=ON")
|
@@ -308,93 +580,29 @@ async def whisper_install(
|
|
308
580
|
if 'original_dir' in locals():
|
309
581
|
os.chdir(original_dir)
|
310
582
|
|
311
|
-
#
|
312
|
-
logger.info("Installing
|
583
|
+
# Update service files (includes creating start script)
|
584
|
+
logger.info("Installing/updating service files...")
|
585
|
+
service_update_result = await update_whisper_service_files(
|
586
|
+
install_dir=install_dir,
|
587
|
+
voicemode_dir=voicemode_dir,
|
588
|
+
auto_enable=auto_enable
|
589
|
+
)
|
313
590
|
|
314
|
-
|
315
|
-
|
316
|
-
|
591
|
+
if not service_update_result.get("success"):
|
592
|
+
logger.error(f"Failed to update service files: {service_update_result.get('error', 'Unknown error')}")
|
593
|
+
return {
|
594
|
+
"success": False,
|
595
|
+
"error": f"Service file update failed: {service_update_result.get('error', 'Unknown error')}"
|
596
|
+
}
|
317
597
|
|
318
|
-
#
|
319
|
-
|
598
|
+
# Get the start script path from the result
|
599
|
+
start_script_path = service_update_result.get("start_script")
|
320
600
|
|
321
|
-
#
|
322
|
-
source_template = Path(__file__).parent.parent.parent / "templates" / "scripts" / "start-whisper-server.sh"
|
323
|
-
if source_template.exists():
|
324
|
-
logger.info(f"Loading template from source: {source_template}")
|
325
|
-
template_content = source_template.read_text()
|
326
|
-
else:
|
327
|
-
# Try loading from package resources
|
328
|
-
try:
|
329
|
-
template_resource = files("voice_mode.templates.scripts").joinpath("start-whisper-server.sh")
|
330
|
-
template_content = template_resource.read_text()
|
331
|
-
logger.info("Loaded template from package resources")
|
332
|
-
except Exception as e:
|
333
|
-
logger.warning(f"Failed to load template script: {e}. Using fallback inline script.")
|
334
|
-
|
335
|
-
# Create the start script (whether template was loaded from file or created inline)
|
336
|
-
start_script_path = os.path.join(bin_dir, "start-whisper-server.sh")
|
337
|
-
with open(start_script_path, 'w') as f:
|
338
|
-
f.write(template_content)
|
339
|
-
os.chmod(start_script_path, 0o755)
|
340
|
-
|
341
|
-
# Install launchagent on macOS
|
601
|
+
# Build return message based on results
|
342
602
|
if system == "Darwin":
|
343
|
-
logger.info("Installing launchagent for whisper-server...")
|
344
|
-
launchagents_dir = os.path.expanduser("~/Library/LaunchAgents")
|
345
|
-
os.makedirs(launchagents_dir, exist_ok=True)
|
346
|
-
|
347
|
-
# Create log directory
|
348
|
-
log_dir = os.path.join(voicemode_dir, 'logs', 'whisper')
|
349
|
-
os.makedirs(log_dir, exist_ok=True)
|
350
|
-
|
351
|
-
plist_name = "com.voicemode.whisper.plist"
|
352
|
-
plist_path = os.path.join(launchagents_dir, plist_name)
|
353
|
-
|
354
|
-
# Load plist template
|
355
|
-
# First try to load from source if running in development
|
356
|
-
source_template = Path(__file__).parent.parent.parent / "templates" / "launchd" / "com.voicemode.whisper.plist"
|
357
|
-
if source_template.exists():
|
358
|
-
logger.info(f"Loading plist template from source: {source_template}")
|
359
|
-
plist_content = source_template.read_text()
|
360
|
-
else:
|
361
|
-
# Load from package resources
|
362
|
-
template_resource = files("voice_mode.templates.launchd").joinpath("com.voicemode.whisper.plist")
|
363
|
-
plist_content = template_resource.read_text()
|
364
|
-
logger.info("Loaded plist template from package resources")
|
365
|
-
|
366
|
-
# Replace placeholders
|
367
|
-
plist_content = plist_content.replace("{START_SCRIPT_PATH}", start_script_path)
|
368
|
-
plist_content = plist_content.replace("{LOG_DIR}", os.path.join(voicemode_dir, 'logs'))
|
369
|
-
plist_content = plist_content.replace("{INSTALL_DIR}", install_dir)
|
370
|
-
|
371
|
-
with open(plist_path, 'w') as f:
|
372
|
-
f.write(plist_content)
|
373
|
-
|
374
|
-
# Unload if already loaded (ignore errors)
|
375
|
-
try:
|
376
|
-
subprocess.run(["launchctl", "unload", plist_path], capture_output=True)
|
377
|
-
except:
|
378
|
-
pass # Ignore if not loaded
|
379
|
-
|
380
|
-
# Don't load here - let enable_service handle it with the -w flag
|
381
|
-
# This prevents the "already loaded" error when enable_service runs
|
382
|
-
|
383
|
-
# Handle auto_enable
|
384
|
-
enable_message = ""
|
385
|
-
if auto_enable is None:
|
386
|
-
auto_enable = SERVICE_AUTO_ENABLE
|
387
|
-
|
388
|
-
if auto_enable:
|
389
|
-
logger.info("Auto-enabling whisper service...")
|
390
|
-
from voice_mode.tools.service import enable_service
|
391
|
-
enable_result = await enable_service("whisper")
|
392
|
-
if "✅" in enable_result:
|
393
|
-
enable_message = " Service auto-enabled."
|
394
|
-
else:
|
395
|
-
logger.warning(f"Auto-enable failed: {enable_result}")
|
396
|
-
|
397
603
|
current_version = get_current_version(Path(install_dir))
|
604
|
+
enable_message = " Service auto-enabled." if service_update_result.get("enabled") else ""
|
605
|
+
|
398
606
|
return {
|
399
607
|
"success": True,
|
400
608
|
"install_path": install_dir,
|
@@ -410,117 +618,53 @@ async def whisper_install(
|
|
410
618
|
"server_port": 2022,
|
411
619
|
"server_url": "http://localhost:2022"
|
412
620
|
},
|
413
|
-
"launchagent": plist_path,
|
621
|
+
"launchagent": service_update_result.get("plist_path"),
|
414
622
|
"start_script": start_script_path,
|
415
623
|
"message": f"Successfully installed whisper.cpp {current_version} with {gpu_type} support and whisper-server on port 2022{enable_message}{' (' + migration_msg + ')' if migration_msg else ''}"
|
416
624
|
}
|
625
|
+
|
417
626
|
elif system == "Linux":
|
418
|
-
# Install systemd service on Linux
|
419
|
-
logger.info("Installing systemd user service for whisper-server...")
|
420
|
-
systemd_user_dir = os.path.expanduser("~/.config/systemd/user")
|
421
|
-
os.makedirs(systemd_user_dir, exist_ok=True)
|
422
|
-
|
423
|
-
# Create log directory
|
424
|
-
log_dir = os.path.join(voicemode_dir, 'logs', 'whisper')
|
425
|
-
os.makedirs(log_dir, exist_ok=True)
|
426
|
-
|
427
|
-
service_name = "voicemode-whisper.service"
|
428
|
-
service_path = os.path.join(systemd_user_dir, service_name)
|
429
|
-
|
430
|
-
service_content = f"""[Unit]
|
431
|
-
Description=Whisper.cpp Speech Recognition Server
|
432
|
-
After=network.target
|
433
|
-
|
434
|
-
[Service]
|
435
|
-
Type=simple
|
436
|
-
ExecStart={start_script_path}
|
437
|
-
Restart=on-failure
|
438
|
-
RestartSec=10
|
439
|
-
WorkingDirectory={install_dir}
|
440
|
-
StandardOutput=append:{os.path.join(voicemode_dir, 'logs', 'whisper', 'whisper.out.log')}
|
441
|
-
StandardError=append:{os.path.join(voicemode_dir, 'logs', 'whisper', 'whisper.err.log')}
|
442
|
-
Environment="PATH=/usr/local/bin:/usr/bin:/bin:/usr/local/cuda/bin"
|
443
|
-
|
444
|
-
[Install]
|
445
|
-
WantedBy=default.target
|
446
|
-
"""
|
447
|
-
|
448
|
-
with open(service_path, 'w') as f:
|
449
|
-
f.write(service_content)
|
450
|
-
|
451
|
-
# Reload systemd and enable service
|
452
|
-
try:
|
453
|
-
subprocess.run(["systemctl", "--user", "daemon-reload"], check=True)
|
454
|
-
subprocess.run(["systemctl", "--user", "enable", service_name], check=True)
|
455
|
-
subprocess.run(["systemctl", "--user", "start", service_name], check=True)
|
456
|
-
|
457
|
-
systemd_enabled = True
|
458
|
-
systemd_message = "Systemd service installed and started"
|
459
|
-
except subprocess.CalledProcessError as e:
|
460
|
-
systemd_enabled = False
|
461
|
-
systemd_message = f"Systemd service created but not started: {e}"
|
462
|
-
logger.warning(systemd_message)
|
463
|
-
|
464
|
-
# Handle auto_enable
|
465
|
-
enable_message = ""
|
466
|
-
if auto_enable is None:
|
467
|
-
auto_enable = SERVICE_AUTO_ENABLE
|
468
|
-
|
469
|
-
if auto_enable:
|
470
|
-
logger.info("Auto-enabling whisper service...")
|
471
|
-
from voice_mode.tools.service import enable_service
|
472
|
-
enable_result = await enable_service("whisper")
|
473
|
-
if "✅" in enable_result:
|
474
|
-
enable_message = " Service auto-enabled."
|
475
|
-
else:
|
476
|
-
logger.warning(f"Auto-enable failed: {enable_result}")
|
477
|
-
|
478
627
|
current_version = get_current_version(Path(install_dir))
|
628
|
+
enable_message = " Service auto-enabled." if service_update_result.get("enabled") else ""
|
629
|
+
systemd_message = "Systemd service installed"
|
630
|
+
|
479
631
|
return {
|
480
632
|
"success": True,
|
481
|
-
|
482
|
-
|
483
|
-
|
484
|
-
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
633
|
+
"install_path": install_dir,
|
634
|
+
"model_path": model_path,
|
635
|
+
"gpu_enabled": use_gpu,
|
636
|
+
"gpu_type": gpu_type,
|
637
|
+
"version": current_version,
|
638
|
+
"performance_info": {
|
639
|
+
"system": system,
|
640
|
+
"gpu_acceleration": gpu_type,
|
641
|
+
"model": model,
|
642
|
+
"binary_path": main_path if 'main_path' in locals() else os.path.join(install_dir, "main"),
|
643
|
+
"server_port": 2022,
|
644
|
+
"server_url": "http://localhost:2022"
|
645
|
+
},
|
646
|
+
"systemd_service": service_update_result.get("service_path"),
|
647
|
+
"systemd_enabled": service_update_result.get("enabled", False),
|
648
|
+
"start_script": start_script_path,
|
649
|
+
"message": f"Successfully installed whisper.cpp {current_version} with {gpu_type} support. {systemd_message}{enable_message}{' (' + migration_msg + ')' if migration_msg else ''}"
|
498
650
|
}
|
499
|
-
|
651
|
+
|
500
652
|
else:
|
501
|
-
# Handle auto_enable for other systems (if we add Windows support later)
|
502
|
-
enable_message = ""
|
503
|
-
if auto_enable is None:
|
504
|
-
auto_enable = SERVICE_AUTO_ENABLE
|
505
|
-
|
506
|
-
if auto_enable:
|
507
|
-
logger.info("Auto-enable not supported on this platform")
|
508
|
-
|
509
653
|
current_version = get_current_version(Path(install_dir))
|
510
654
|
return {
|
511
655
|
"success": True,
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
656
|
+
"install_path": install_dir,
|
657
|
+
"model_path": model_path,
|
658
|
+
"gpu_enabled": use_gpu,
|
659
|
+
"gpu_type": gpu_type,
|
660
|
+
"version": current_version,
|
661
|
+
"performance_info": {
|
662
|
+
"system": system,
|
663
|
+
"gpu_acceleration": gpu_type,
|
664
|
+
"model": model,
|
665
|
+
"binary_path": main_path if 'main_path' in locals() else os.path.join(install_dir, "main")
|
666
|
+
},
|
667
|
+
"message": f"Successfully installed whisper.cpp {current_version} with {gpu_type} support{enable_message}{' (' + migration_msg + ')' if migration_msg else ''}"
|
524
668
|
}
|
525
669
|
|
526
670
|
except subprocess.CalledProcessError as e:
|
@@ -537,4 +681,4 @@ async def whisper_install(
|
|
537
681
|
return {
|
538
682
|
"success": False,
|
539
683
|
"error": str(e)
|
540
|
-
}
|
684
|
+
}
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: voice-mode
|
3
|
-
Version: 2.
|
3
|
+
Version: 2.34.0
|
4
4
|
Summary: VoiceMode - Voice interaction capabilities for AI assistants (formerly voice-mcp)
|
5
5
|
Project-URL: Homepage, https://github.com/mbailey/voicemode
|
6
6
|
Project-URL: Repository, https://github.com/mbailey/voicemode
|