voice-mode 2.27.0__py3-none-any.whl → 2.28.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- voice_mode/__version__.py +1 -1
- voice_mode/cli.py +152 -37
- voice_mode/cli_commands/exchanges.py +6 -0
- voice_mode/frontend/.next/BUILD_ID +1 -1
- voice_mode/frontend/.next/app-build-manifest.json +5 -5
- voice_mode/frontend/.next/build-manifest.json +3 -3
- voice_mode/frontend/.next/next-minimal-server.js.nft.json +1 -1
- voice_mode/frontend/.next/next-server.js.nft.json +1 -1
- voice_mode/frontend/.next/prerender-manifest.json +1 -1
- voice_mode/frontend/.next/required-server-files.json +1 -1
- voice_mode/frontend/.next/server/app/_not-found/page.js +1 -1
- voice_mode/frontend/.next/server/app/_not-found/page_client-reference-manifest.js +1 -1
- voice_mode/frontend/.next/server/app/_not-found.html +1 -1
- voice_mode/frontend/.next/server/app/_not-found.rsc +1 -1
- voice_mode/frontend/.next/server/app/api/connection-details/route.js +2 -2
- voice_mode/frontend/.next/server/app/favicon.ico/route.js +2 -2
- voice_mode/frontend/.next/server/app/index.html +1 -1
- voice_mode/frontend/.next/server/app/index.rsc +2 -2
- voice_mode/frontend/.next/server/app/page.js +2 -2
- voice_mode/frontend/.next/server/app/page_client-reference-manifest.js +1 -1
- voice_mode/frontend/.next/server/chunks/994.js +1 -1
- voice_mode/frontend/.next/server/middleware-build-manifest.js +1 -1
- voice_mode/frontend/.next/server/next-font-manifest.js +1 -1
- voice_mode/frontend/.next/server/next-font-manifest.json +1 -1
- voice_mode/frontend/.next/server/pages/404.html +1 -1
- voice_mode/frontend/.next/server/pages/500.html +1 -1
- voice_mode/frontend/.next/server/server-reference-manifest.json +1 -1
- voice_mode/frontend/.next/standalone/.next/BUILD_ID +1 -1
- voice_mode/frontend/.next/standalone/.next/app-build-manifest.json +5 -5
- voice_mode/frontend/.next/standalone/.next/build-manifest.json +3 -3
- voice_mode/frontend/.next/standalone/.next/prerender-manifest.json +1 -1
- voice_mode/frontend/.next/standalone/.next/required-server-files.json +1 -1
- voice_mode/frontend/.next/standalone/.next/server/app/_not-found/page.js +1 -1
- voice_mode/frontend/.next/standalone/.next/server/app/_not-found/page_client-reference-manifest.js +1 -1
- voice_mode/frontend/.next/standalone/.next/server/app/_not-found.html +1 -1
- voice_mode/frontend/.next/standalone/.next/server/app/_not-found.rsc +1 -1
- voice_mode/frontend/.next/standalone/.next/server/app/api/connection-details/route.js +2 -2
- voice_mode/frontend/.next/standalone/.next/server/app/favicon.ico/route.js +2 -2
- voice_mode/frontend/.next/standalone/.next/server/app/index.html +1 -1
- voice_mode/frontend/.next/standalone/.next/server/app/index.rsc +2 -2
- voice_mode/frontend/.next/standalone/.next/server/app/page.js +2 -2
- voice_mode/frontend/.next/standalone/.next/server/app/page_client-reference-manifest.js +1 -1
- voice_mode/frontend/.next/standalone/.next/server/chunks/994.js +1 -1
- voice_mode/frontend/.next/standalone/.next/server/middleware-build-manifest.js +1 -1
- voice_mode/frontend/.next/standalone/.next/server/next-font-manifest.js +1 -1
- voice_mode/frontend/.next/standalone/.next/server/next-font-manifest.json +1 -1
- voice_mode/frontend/.next/standalone/.next/server/pages/404.html +1 -1
- voice_mode/frontend/.next/standalone/.next/server/pages/500.html +1 -1
- voice_mode/frontend/.next/standalone/.next/server/server-reference-manifest.json +1 -1
- voice_mode/frontend/.next/standalone/server.js +1 -1
- voice_mode/frontend/.next/static/chunks/app/{layout-08be62ed6e344292.js → layout-2a1721553cbe58e4.js} +1 -1
- voice_mode/frontend/.next/static/chunks/app/page-fe35d9da20297c85.js +1 -0
- voice_mode/frontend/.next/static/chunks/{main-app-413f77c1f2c53e3f.js → main-app-c17195caa4e269d6.js} +1 -1
- voice_mode/frontend/.next/trace +43 -43
- voice_mode/frontend/.next/types/app/api/connection-details/route.ts +1 -1
- voice_mode/frontend/.next/types/app/layout.ts +1 -1
- voice_mode/frontend/.next/types/app/page.ts +1 -1
- voice_mode/frontend/package-lock.json +6 -6
- voice_mode/tools/converse.py +44 -24
- voice_mode/tools/service.py +30 -3
- voice_mode/tools/services/kokoro/install.py +1 -1
- voice_mode/tools/services/whisper/__init__.py +15 -5
- voice_mode/tools/services/whisper/install.py +41 -9
- voice_mode/tools/services/whisper/list_models.py +14 -14
- voice_mode/tools/services/whisper/model_active.py +54 -0
- voice_mode/tools/services/whisper/model_benchmark.py +159 -0
- voice_mode/tools/services/whisper/{download_model.py → model_install.py} +72 -11
- voice_mode/tools/services/whisper/model_remove.py +36 -0
- voice_mode/tools/services/whisper/models.py +225 -26
- voice_mode/utils/services/whisper_helpers.py +206 -19
- voice_mode/utils/services/whisper_version.py +138 -0
- {voice_mode-2.27.0.dist-info → voice_mode-2.28.1.dist-info}/METADATA +5 -1
- {voice_mode-2.27.0.dist-info → voice_mode-2.28.1.dist-info}/RECORD +77 -74
- voice_mode/frontend/.next/static/chunks/app/page-80fc72669f25298f.js +0 -1
- voice_mode/tools/services/whisper/list_models_tool.py +0 -65
- /voice_mode/frontend/.next/static/{wQ5pxzPmwjlzdUfJwSjMg → LhJalgfazyY_l3L_v0_Kw}/_buildManifest.js +0 -0
- /voice_mode/frontend/.next/static/{wQ5pxzPmwjlzdUfJwSjMg → LhJalgfazyY_l3L_v0_Kw}/_ssgManifest.js +0 -0
- {voice_mode-2.27.0.dist-info → voice_mode-2.28.1.dist-info}/WHEEL +0 -0
- {voice_mode-2.27.0.dist-info → voice_mode-2.28.1.dist-info}/entry_points.txt +0 -0
@@ -14,7 +14,7 @@ logger = logging.getLogger("voice-mode")
|
|
14
14
|
|
15
15
|
|
16
16
|
@mcp.tool()
|
17
|
-
async def
|
17
|
+
async def whisper_model_install(
|
18
18
|
model: Union[str, List[str]] = "large-v2",
|
19
19
|
force_download: Union[bool, str] = False,
|
20
20
|
skip_core_ml: Union[bool, str] = False
|
@@ -109,11 +109,32 @@ async def download_model(
|
|
109
109
|
force_download=force_download
|
110
110
|
)
|
111
111
|
|
112
|
-
|
112
|
+
# Build comprehensive result entry
|
113
|
+
model_result = {
|
113
114
|
"model": model_name,
|
114
|
-
"
|
115
|
-
"message": result.get("message", result.get("error", "Unknown error"))
|
116
|
-
|
115
|
+
"download_success": result["success"],
|
116
|
+
"message": result.get("message", result.get("error", "Unknown error")),
|
117
|
+
"acceleration": result.get("acceleration", "unknown")
|
118
|
+
}
|
119
|
+
|
120
|
+
# Include Core ML status if available
|
121
|
+
if "core_ml_status" in result and not skip_core_ml:
|
122
|
+
core_ml = result["core_ml_status"]
|
123
|
+
model_result["core_ml"] = {
|
124
|
+
"attempted": True,
|
125
|
+
"success": core_ml.get("success", False),
|
126
|
+
"error_category": core_ml.get("error_category") if not core_ml.get("success") else None,
|
127
|
+
"error": core_ml.get("error") if not core_ml.get("success") else None,
|
128
|
+
"fix_command": core_ml.get("install_command") if not core_ml.get("success") else None,
|
129
|
+
"package_size": core_ml.get("package_size") if not core_ml.get("success") else None
|
130
|
+
}
|
131
|
+
elif skip_core_ml:
|
132
|
+
model_result["core_ml"] = {
|
133
|
+
"attempted": False,
|
134
|
+
"reason": "Skipped by user request"
|
135
|
+
}
|
136
|
+
|
137
|
+
results.append(model_result)
|
117
138
|
|
118
139
|
if result["success"]:
|
119
140
|
success_count += 1
|
@@ -121,18 +142,58 @@ async def download_model(
|
|
121
142
|
# Summary
|
122
143
|
total_models = len(models_to_download)
|
123
144
|
|
124
|
-
|
145
|
+
# Collect warnings and recommendations
|
146
|
+
warnings = []
|
147
|
+
recommendations = []
|
148
|
+
|
149
|
+
# Check for Core ML issues
|
150
|
+
core_ml_failures = [r for r in results if r.get("core_ml", {}).get("attempted") and not r.get("core_ml", {}).get("success")]
|
151
|
+
if core_ml_failures:
|
152
|
+
# Group by error category
|
153
|
+
error_categories = {}
|
154
|
+
for failure in core_ml_failures:
|
155
|
+
category = failure["core_ml"].get("error_category", "unknown")
|
156
|
+
if category not in error_categories:
|
157
|
+
error_categories[category] = failure["core_ml"]
|
158
|
+
|
159
|
+
# Add warnings for each category
|
160
|
+
if "missing_pytorch" in error_categories:
|
161
|
+
warnings.append("PyTorch not installed - Core ML acceleration unavailable")
|
162
|
+
recommendations.append(f"Install PyTorch for Core ML: {error_categories['missing_pytorch'].get('fix_command', 'uv pip install torch')}")
|
163
|
+
elif "missing_coremltools" in error_categories:
|
164
|
+
warnings.append("CoreMLTools not installed - Core ML acceleration unavailable")
|
165
|
+
recommendations.append(f"Install CoreMLTools: {error_categories['missing_coremltools'].get('fix_command', 'uv pip install coremltools')}")
|
166
|
+
|
167
|
+
# General Core ML recommendation
|
168
|
+
if len(core_ml_failures) == len(results):
|
169
|
+
recommendations.append("Models will use Metal acceleration. Core ML provides better performance on Apple Silicon.")
|
170
|
+
|
171
|
+
summary = {
|
125
172
|
"success": success_count > 0,
|
126
173
|
"models_directory": str(actual_models_dir),
|
127
174
|
"total_requested": total_models,
|
128
175
|
"successful_downloads": success_count,
|
129
176
|
"failed_downloads": total_models - success_count,
|
130
177
|
"results": results,
|
131
|
-
"
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
178
|
+
"core_ml_available": not skip_core_ml and os.uname().machine == "arm64",
|
179
|
+
}
|
180
|
+
|
181
|
+
# Add warnings and recommendations if present
|
182
|
+
if warnings:
|
183
|
+
summary["warnings"] = warnings
|
184
|
+
if recommendations:
|
185
|
+
summary["recommendations"] = recommendations
|
186
|
+
|
187
|
+
# Add overall status message
|
188
|
+
if success_count == total_models:
|
189
|
+
if core_ml_failures:
|
190
|
+
summary["status"] = "Models downloaded successfully but Core ML conversion failed. Using Metal acceleration."
|
191
|
+
else:
|
192
|
+
summary["status"] = "Models downloaded and converted successfully. Ready to use with whisper_start."
|
193
|
+
else:
|
194
|
+
summary["status"] = "Some models failed to download. Check the results for details."
|
195
|
+
|
196
|
+
return json.dumps(summary, indent=2)
|
136
197
|
|
137
198
|
except Exception as e:
|
138
199
|
logger.error(f"Error in download_model: {e}")
|
@@ -0,0 +1,36 @@
|
|
1
|
+
"""MCP tool for removing Whisper models."""
|
2
|
+
|
3
|
+
from typing import Dict, Any
|
4
|
+
from voice_mode.tools.services.whisper.models import (
|
5
|
+
remove_whisper_model,
|
6
|
+
get_active_model,
|
7
|
+
format_size
|
8
|
+
)
|
9
|
+
|
10
|
+
|
11
|
+
async def whisper_model_remove(model_name: str, remove_coreml: bool = True) -> Dict[str, Any]:
|
12
|
+
"""Remove an installed Whisper model.
|
13
|
+
|
14
|
+
Args:
|
15
|
+
model_name: Name of model to remove
|
16
|
+
remove_coreml: Also remove Core ML version if exists
|
17
|
+
|
18
|
+
Returns:
|
19
|
+
Dict with removal status
|
20
|
+
"""
|
21
|
+
# Check if trying to remove active model
|
22
|
+
if model_name == get_active_model():
|
23
|
+
return {
|
24
|
+
"success": False,
|
25
|
+
"error": f"Cannot remove active model {model_name}. Set a different model as active first using whisper_model_active()"
|
26
|
+
}
|
27
|
+
|
28
|
+
# Remove the model
|
29
|
+
result = remove_whisper_model(model_name, remove_coreml)
|
30
|
+
|
31
|
+
# Format the result for better readability
|
32
|
+
if result["success"]:
|
33
|
+
result["space_freed_formatted"] = format_size(result.get("space_freed_mb", 0))
|
34
|
+
result["message"] = f"Successfully removed model {model_name}"
|
35
|
+
|
36
|
+
return result
|
@@ -1,8 +1,9 @@
|
|
1
1
|
"""Whisper model registry and utilities."""
|
2
2
|
|
3
3
|
import os
|
4
|
+
import warnings
|
4
5
|
from pathlib import Path
|
5
|
-
from typing import Dict, List, Optional, TypedDict
|
6
|
+
from typing import Any, Dict, List, Optional, TypedDict
|
6
7
|
from voice_mode.config import WHISPER_MODEL_PATH, WHISPER_MODEL
|
7
8
|
|
8
9
|
|
@@ -15,7 +16,7 @@ class ModelInfo(TypedDict):
|
|
15
16
|
|
16
17
|
|
17
18
|
# Registry of all available Whisper models
|
18
|
-
|
19
|
+
WHISPER_MODEL_REGISTRY: Dict[str, ModelInfo] = {
|
19
20
|
"tiny": {
|
20
21
|
"size_mb": 39,
|
21
22
|
"languages": "Multilingual",
|
@@ -105,32 +106,32 @@ def get_model_directory() -> Path:
|
|
105
106
|
return model_dir
|
106
107
|
|
107
108
|
|
108
|
-
def
|
109
|
+
def get_active_model() -> str:
|
109
110
|
"""Get the currently selected Whisper model."""
|
110
111
|
# Use the configured model from config.py
|
111
112
|
model = WHISPER_MODEL
|
112
113
|
|
113
114
|
# Validate it's a known model
|
114
|
-
if model not in
|
115
|
+
if model not in WHISPER_MODEL_REGISTRY:
|
115
116
|
return "large-v2" # Default fallback
|
116
117
|
|
117
118
|
return model
|
118
119
|
|
119
120
|
|
120
|
-
def
|
121
|
-
"""Check if a model is installed."""
|
122
|
-
if model_name not in
|
121
|
+
def is_whisper_model_installed(model_name: str) -> bool:
|
122
|
+
"""Check if a Whisper model is installed."""
|
123
|
+
if model_name not in WHISPER_MODEL_REGISTRY:
|
123
124
|
return False
|
124
125
|
|
125
126
|
model_dir = get_model_directory()
|
126
|
-
model_info =
|
127
|
+
model_info = WHISPER_MODEL_REGISTRY[model_name]
|
127
128
|
model_path = model_dir / model_info["filename"]
|
128
129
|
|
129
130
|
return model_path.exists()
|
130
131
|
|
131
132
|
|
132
|
-
def
|
133
|
-
"""Check if a Core ML model is available for the given model.
|
133
|
+
def has_whisper_coreml_model(model_name: str) -> bool:
|
134
|
+
"""Check if a Core ML model is available for the given Whisper model.
|
134
135
|
|
135
136
|
Core ML models are only used on macOS with Apple Silicon.
|
136
137
|
They have the extension .mlmodelc and provide faster inference.
|
@@ -141,23 +142,25 @@ def has_coreml_model(model_name: str) -> bool:
|
|
141
142
|
if platform.system() != "Darwin":
|
142
143
|
return False
|
143
144
|
|
144
|
-
if model_name not in
|
145
|
+
if model_name not in WHISPER_MODEL_REGISTRY:
|
145
146
|
return False
|
146
147
|
|
147
148
|
model_dir = get_model_directory()
|
148
|
-
model_info =
|
149
|
+
model_info = WHISPER_MODEL_REGISTRY[model_name]
|
149
150
|
|
150
|
-
# Core ML
|
151
|
-
|
151
|
+
# Core ML models can be either compiled (.mlmodelc) or package (.mlpackage)
|
152
|
+
# Check for both formats
|
153
|
+
coreml_compiled = model_dir / f"ggml-{model_name}-encoder.mlmodelc"
|
154
|
+
coreml_package = model_dir / f"coreml-encoder-{model_name}.mlpackage"
|
152
155
|
|
153
|
-
return
|
156
|
+
return coreml_compiled.exists() or coreml_package.exists()
|
154
157
|
|
155
158
|
|
156
|
-
def
|
157
|
-
"""Get list of installed models."""
|
159
|
+
def get_installed_whisper_models() -> List[str]:
|
160
|
+
"""Get list of installed Whisper models."""
|
158
161
|
installed = []
|
159
|
-
for model_name in
|
160
|
-
if
|
162
|
+
for model_name in WHISPER_MODEL_REGISTRY:
|
163
|
+
if is_whisper_model_installed(model_name):
|
161
164
|
installed.append(model_name)
|
162
165
|
return installed
|
163
166
|
|
@@ -172,12 +175,12 @@ def get_total_size(models: Optional[List[str]] = None) -> int:
|
|
172
175
|
Total size in MB
|
173
176
|
"""
|
174
177
|
if models is None:
|
175
|
-
models = list(
|
178
|
+
models = list(WHISPER_MODEL_REGISTRY.keys())
|
176
179
|
|
177
180
|
total = 0
|
178
181
|
for model in models:
|
179
|
-
if model in
|
180
|
-
total +=
|
182
|
+
if model in WHISPER_MODEL_REGISTRY:
|
183
|
+
total += WHISPER_MODEL_REGISTRY[model]["size_mb"]
|
181
184
|
|
182
185
|
return total
|
183
186
|
|
@@ -203,8 +206,8 @@ def is_apple_silicon() -> bool:
|
|
203
206
|
return platform.system() == "Darwin" and platform.machine() == "arm64"
|
204
207
|
|
205
208
|
|
206
|
-
def
|
207
|
-
"""Set the
|
209
|
+
def set_active_model(model_name: str) -> None:
|
210
|
+
"""Set the active Whisper model.
|
208
211
|
|
209
212
|
Args:
|
210
213
|
model_name: Name of the model to set as active
|
@@ -215,7 +218,7 @@ def set_current_model(model_name: str) -> None:
|
|
215
218
|
import re
|
216
219
|
|
217
220
|
# Configuration file path
|
218
|
-
config_path = Path.home() / ".voicemode" / "
|
221
|
+
config_path = Path.home() / ".voicemode" / "voicemode.env"
|
219
222
|
|
220
223
|
# Ensure directory exists
|
221
224
|
config_path.parent.mkdir(parents=True, exist_ok=True)
|
@@ -271,4 +274,200 @@ def set_current_model(model_name: str) -> None:
|
|
271
274
|
|
272
275
|
# Write the updated configuration
|
273
276
|
with open(config_path, 'w') as f:
|
274
|
-
f.writelines(lines)
|
277
|
+
f.writelines(lines)
|
278
|
+
|
279
|
+
|
280
|
+
def remove_whisper_model(model_name: str, remove_coreml: bool = True) -> Dict[str, Any]:
|
281
|
+
"""Remove a whisper model and optionally its Core ML version.
|
282
|
+
|
283
|
+
Args:
|
284
|
+
model_name: Name of the model to remove
|
285
|
+
remove_coreml: Also remove Core ML version if it exists
|
286
|
+
|
287
|
+
Returns:
|
288
|
+
Dict with success status and space freed
|
289
|
+
"""
|
290
|
+
model_dir = get_model_directory()
|
291
|
+
|
292
|
+
if model_name not in WHISPER_MODEL_REGISTRY:
|
293
|
+
return {"success": False, "error": f"Model {model_name} not recognized"}
|
294
|
+
|
295
|
+
model_info = WHISPER_MODEL_REGISTRY[model_name]
|
296
|
+
model_file = model_dir / model_info["filename"]
|
297
|
+
|
298
|
+
if not model_file.exists():
|
299
|
+
return {"success": False, "error": f"Model {model_name} not found"}
|
300
|
+
|
301
|
+
space_freed = model_file.stat().st_size
|
302
|
+
model_file.unlink()
|
303
|
+
|
304
|
+
if remove_coreml and has_whisper_coreml_model(model_name):
|
305
|
+
# Remove both possible Core ML formats
|
306
|
+
coreml_compiled = model_dir / f"ggml-{model_name}-encoder.mlmodelc"
|
307
|
+
coreml_package = model_dir / f"coreml-encoder-{model_name}.mlpackage"
|
308
|
+
|
309
|
+
if coreml_compiled.exists():
|
310
|
+
import shutil
|
311
|
+
shutil.rmtree(coreml_compiled)
|
312
|
+
# Estimate size since it's a directory
|
313
|
+
space_freed += 100 * 1024 * 1024 # Approximate 100MB
|
314
|
+
|
315
|
+
if coreml_package.exists():
|
316
|
+
import shutil
|
317
|
+
shutil.rmtree(coreml_package)
|
318
|
+
space_freed += 100 * 1024 * 1024 # Approximate 100MB
|
319
|
+
|
320
|
+
return {
|
321
|
+
"success": True,
|
322
|
+
"model": model_name,
|
323
|
+
"space_freed": space_freed,
|
324
|
+
"space_freed_mb": space_freed // (1024 * 1024)
|
325
|
+
}
|
326
|
+
|
327
|
+
|
328
|
+
def benchmark_whisper_model(model_name: str, sample_file: Optional[str] = None) -> Dict[str, Any]:
|
329
|
+
"""Run performance benchmark on a whisper model.
|
330
|
+
|
331
|
+
Args:
|
332
|
+
model_name: Name of the model to benchmark
|
333
|
+
sample_file: Optional audio file to use (defaults to JFK sample)
|
334
|
+
|
335
|
+
Returns:
|
336
|
+
Dict with benchmark results
|
337
|
+
"""
|
338
|
+
import subprocess
|
339
|
+
import re
|
340
|
+
from pathlib import Path
|
341
|
+
|
342
|
+
if not is_whisper_model_installed(model_name):
|
343
|
+
return {
|
344
|
+
"success": False,
|
345
|
+
"error": f"Model {model_name} is not installed"
|
346
|
+
}
|
347
|
+
|
348
|
+
# Find whisper-cli binary
|
349
|
+
whisper_bin = Path.home() / ".voicemode" / "services" / "whisper" / "build" / "bin" / "whisper-cli"
|
350
|
+
if not whisper_bin.exists():
|
351
|
+
return {
|
352
|
+
"success": False,
|
353
|
+
"error": "Whisper CLI not found. Please install whisper.cpp first."
|
354
|
+
}
|
355
|
+
|
356
|
+
# Use sample file or default JFK sample
|
357
|
+
if sample_file is None:
|
358
|
+
sample_file = Path.home() / ".voicemode" / "services" / "whisper" / "samples" / "jfk.wav"
|
359
|
+
if not sample_file.exists():
|
360
|
+
return {
|
361
|
+
"success": False,
|
362
|
+
"error": "Default sample file not found"
|
363
|
+
}
|
364
|
+
|
365
|
+
model_dir = get_model_directory()
|
366
|
+
model_info = WHISPER_MODEL_REGISTRY[model_name]
|
367
|
+
model_path = model_dir / model_info["filename"]
|
368
|
+
|
369
|
+
# Run benchmark
|
370
|
+
try:
|
371
|
+
result = subprocess.run(
|
372
|
+
[
|
373
|
+
str(whisper_bin),
|
374
|
+
"--model", str(model_path),
|
375
|
+
"--file", str(sample_file),
|
376
|
+
"--threads", "8",
|
377
|
+
"--beam-size", "1"
|
378
|
+
],
|
379
|
+
capture_output=True,
|
380
|
+
text=True,
|
381
|
+
timeout=60
|
382
|
+
)
|
383
|
+
|
384
|
+
# Parse timing information
|
385
|
+
output = result.stderr + result.stdout
|
386
|
+
|
387
|
+
# Extract timings using regex
|
388
|
+
encode_match = re.search(r'encode time\s*=\s*([\d.]+)\s*ms', output)
|
389
|
+
total_match = re.search(r'total time\s*=\s*([\d.]+)\s*ms', output)
|
390
|
+
load_match = re.search(r'load time\s*=\s*([\d.]+)\s*ms', output)
|
391
|
+
|
392
|
+
encode_time = float(encode_match.group(1)) if encode_match else 0
|
393
|
+
total_time = float(total_match.group(1)) if total_match else 0
|
394
|
+
load_time = float(load_match.group(1)) if load_match else 0
|
395
|
+
|
396
|
+
# Calculate real-time factor (11 seconds for JFK sample)
|
397
|
+
rtf = 11000 / total_time if total_time > 0 else 0
|
398
|
+
|
399
|
+
return {
|
400
|
+
"success": True,
|
401
|
+
"model": model_name,
|
402
|
+
"load_time_ms": load_time,
|
403
|
+
"encode_time_ms": encode_time,
|
404
|
+
"total_time_ms": total_time,
|
405
|
+
"real_time_factor": round(rtf, 1),
|
406
|
+
"sample_duration_s": 11.0
|
407
|
+
}
|
408
|
+
|
409
|
+
except subprocess.TimeoutExpired:
|
410
|
+
return {
|
411
|
+
"success": False,
|
412
|
+
"error": "Benchmark timed out"
|
413
|
+
}
|
414
|
+
except Exception as e:
|
415
|
+
return {
|
416
|
+
"success": False,
|
417
|
+
"error": str(e)
|
418
|
+
}
|
419
|
+
|
420
|
+
|
421
|
+
# Backwards compatibility - deprecated functions
|
422
|
+
def get_current_model() -> str:
|
423
|
+
"""DEPRECATED: Use get_active_model() instead."""
|
424
|
+
warnings.warn(
|
425
|
+
"get_current_model() is deprecated. Use get_active_model() instead.",
|
426
|
+
DeprecationWarning,
|
427
|
+
stacklevel=2
|
428
|
+
)
|
429
|
+
return get_active_model()
|
430
|
+
|
431
|
+
|
432
|
+
def set_current_model(model_name: str) -> None:
|
433
|
+
"""DEPRECATED: Use set_active_model() instead."""
|
434
|
+
warnings.warn(
|
435
|
+
"set_current_model() is deprecated. Use set_active_model() instead.",
|
436
|
+
DeprecationWarning,
|
437
|
+
stacklevel=2
|
438
|
+
)
|
439
|
+
return set_active_model(model_name)
|
440
|
+
|
441
|
+
|
442
|
+
def is_model_installed(model_name: str) -> bool:
|
443
|
+
"""DEPRECATED: Use is_whisper_model_installed() instead."""
|
444
|
+
warnings.warn(
|
445
|
+
"is_model_installed() is deprecated. Use is_whisper_model_installed() instead.",
|
446
|
+
DeprecationWarning,
|
447
|
+
stacklevel=2
|
448
|
+
)
|
449
|
+
return is_whisper_model_installed(model_name)
|
450
|
+
|
451
|
+
|
452
|
+
def get_installed_models() -> List[str]:
|
453
|
+
"""DEPRECATED: Use get_installed_whisper_models() instead."""
|
454
|
+
warnings.warn(
|
455
|
+
"get_installed_models() is deprecated. Use get_installed_whisper_models() instead.",
|
456
|
+
DeprecationWarning,
|
457
|
+
stacklevel=2
|
458
|
+
)
|
459
|
+
return get_installed_whisper_models()
|
460
|
+
|
461
|
+
|
462
|
+
def has_coreml_model(model_name: str) -> bool:
|
463
|
+
"""DEPRECATED: Use has_whisper_coreml_model() instead."""
|
464
|
+
warnings.warn(
|
465
|
+
"has_coreml_model() is deprecated. Use has_whisper_coreml_model() instead.",
|
466
|
+
DeprecationWarning,
|
467
|
+
stacklevel=2
|
468
|
+
)
|
469
|
+
return has_whisper_coreml_model(model_name)
|
470
|
+
|
471
|
+
|
472
|
+
# Also provide WHISPER_MODELS as alias for backwards compatibility
|
473
|
+
WHISPER_MODELS = WHISPER_MODEL_REGISTRY
|