hanzo-mcp 0.5.0__py3-none-any.whl → 0.5.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hanzo-mcp might be problematic. Click here for more details.
- hanzo_mcp/__init__.py +1 -1
- hanzo_mcp/config/settings.py +61 -0
- hanzo_mcp/tools/__init__.py +158 -12
- hanzo_mcp/tools/common/base.py +7 -2
- hanzo_mcp/tools/common/config_tool.py +396 -0
- hanzo_mcp/tools/common/stats.py +261 -0
- hanzo_mcp/tools/common/tool_disable.py +144 -0
- hanzo_mcp/tools/common/tool_enable.py +182 -0
- hanzo_mcp/tools/common/tool_list.py +263 -0
- hanzo_mcp/tools/database/__init__.py +71 -0
- hanzo_mcp/tools/database/database_manager.py +246 -0
- hanzo_mcp/tools/database/graph_add.py +257 -0
- hanzo_mcp/tools/database/graph_query.py +536 -0
- hanzo_mcp/tools/database/graph_remove.py +267 -0
- hanzo_mcp/tools/database/graph_search.py +348 -0
- hanzo_mcp/tools/database/graph_stats.py +345 -0
- hanzo_mcp/tools/database/sql_query.py +229 -0
- hanzo_mcp/tools/database/sql_search.py +296 -0
- hanzo_mcp/tools/database/sql_stats.py +254 -0
- hanzo_mcp/tools/editor/__init__.py +11 -0
- hanzo_mcp/tools/editor/neovim_command.py +272 -0
- hanzo_mcp/tools/editor/neovim_edit.py +290 -0
- hanzo_mcp/tools/editor/neovim_session.py +356 -0
- hanzo_mcp/tools/filesystem/__init__.py +20 -1
- hanzo_mcp/tools/filesystem/batch_search.py +812 -0
- hanzo_mcp/tools/filesystem/find_files.py +348 -0
- hanzo_mcp/tools/filesystem/git_search.py +505 -0
- hanzo_mcp/tools/llm/__init__.py +27 -0
- hanzo_mcp/tools/llm/consensus_tool.py +351 -0
- hanzo_mcp/tools/llm/llm_manage.py +413 -0
- hanzo_mcp/tools/llm/llm_tool.py +346 -0
- hanzo_mcp/tools/llm/provider_tools.py +412 -0
- hanzo_mcp/tools/mcp/__init__.py +11 -0
- hanzo_mcp/tools/mcp/mcp_add.py +263 -0
- hanzo_mcp/tools/mcp/mcp_remove.py +127 -0
- hanzo_mcp/tools/mcp/mcp_stats.py +165 -0
- hanzo_mcp/tools/shell/__init__.py +27 -7
- hanzo_mcp/tools/shell/logs.py +265 -0
- hanzo_mcp/tools/shell/npx.py +194 -0
- hanzo_mcp/tools/shell/npx_background.py +254 -0
- hanzo_mcp/tools/shell/pkill.py +262 -0
- hanzo_mcp/tools/shell/processes.py +279 -0
- hanzo_mcp/tools/shell/run_background.py +326 -0
- hanzo_mcp/tools/shell/uvx.py +187 -0
- hanzo_mcp/tools/shell/uvx_background.py +249 -0
- hanzo_mcp/tools/vector/__init__.py +21 -12
- hanzo_mcp/tools/vector/ast_analyzer.py +459 -0
- hanzo_mcp/tools/vector/git_ingester.py +485 -0
- hanzo_mcp/tools/vector/index_tool.py +358 -0
- hanzo_mcp/tools/vector/infinity_store.py +465 -1
- hanzo_mcp/tools/vector/mock_infinity.py +162 -0
- hanzo_mcp/tools/vector/vector_index.py +7 -6
- hanzo_mcp/tools/vector/vector_search.py +22 -7
- {hanzo_mcp-0.5.0.dist-info → hanzo_mcp-0.5.2.dist-info}/METADATA +68 -20
- hanzo_mcp-0.5.2.dist-info/RECORD +106 -0
- hanzo_mcp-0.5.0.dist-info/RECORD +0 -63
- {hanzo_mcp-0.5.0.dist-info → hanzo_mcp-0.5.2.dist-info}/WHEEL +0 -0
- {hanzo_mcp-0.5.0.dist-info → hanzo_mcp-0.5.2.dist-info}/entry_points.txt +0 -0
- {hanzo_mcp-0.5.0.dist-info → hanzo_mcp-0.5.2.dist-info}/licenses/LICENSE +0 -0
- {hanzo_mcp-0.5.0.dist-info → hanzo_mcp-0.5.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,413 @@
|
|
|
1
|
+
"""LLM management tool for enabling/disabling LLM providers."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import json
|
|
5
|
+
from typing import Annotated, Optional, TypedDict, Unpack, final, override
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
from fastmcp import Context as MCPContext
|
|
9
|
+
from pydantic import Field
|
|
10
|
+
|
|
11
|
+
from hanzo_mcp.tools.common.base import BaseTool
|
|
12
|
+
from hanzo_mcp.tools.common.context import create_tool_context
|
|
13
|
+
from hanzo_mcp.tools.llm.llm_tool import LLMTool
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
Action = Annotated[
|
|
17
|
+
str,
|
|
18
|
+
Field(
|
|
19
|
+
description="Action to perform: list, enable, disable, test",
|
|
20
|
+
min_length=1,
|
|
21
|
+
),
|
|
22
|
+
]
|
|
23
|
+
|
|
24
|
+
Provider = Annotated[
|
|
25
|
+
Optional[str],
|
|
26
|
+
Field(
|
|
27
|
+
description="Provider name (for enable/disable/test actions)",
|
|
28
|
+
default=None,
|
|
29
|
+
),
|
|
30
|
+
]
|
|
31
|
+
|
|
32
|
+
Model = Annotated[
|
|
33
|
+
Optional[str],
|
|
34
|
+
Field(
|
|
35
|
+
description="Model to test (for test action)",
|
|
36
|
+
default=None,
|
|
37
|
+
),
|
|
38
|
+
]
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class LLMManageParams(TypedDict, total=False):
|
|
42
|
+
"""Parameters for LLM management tool."""
|
|
43
|
+
|
|
44
|
+
action: str
|
|
45
|
+
provider: Optional[str]
|
|
46
|
+
model: Optional[str]
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
@final
|
|
50
|
+
class LLMManageTool(BaseTool):
|
|
51
|
+
"""Tool for managing LLM providers."""
|
|
52
|
+
|
|
53
|
+
def __init__(self):
|
|
54
|
+
"""Initialize the LLM management tool."""
|
|
55
|
+
self.llm_tool = LLMTool()
|
|
56
|
+
self.config_file = Path.home() / ".hanzo" / "llm" / "providers.json"
|
|
57
|
+
self.config_file.parent.mkdir(parents=True, exist_ok=True)
|
|
58
|
+
self._load_config()
|
|
59
|
+
|
|
60
|
+
def _load_config(self):
|
|
61
|
+
"""Load provider configuration."""
|
|
62
|
+
if self.config_file.exists():
|
|
63
|
+
try:
|
|
64
|
+
with open(self.config_file, 'r') as f:
|
|
65
|
+
self.config = json.load(f)
|
|
66
|
+
except:
|
|
67
|
+
self.config = {"disabled_providers": []}
|
|
68
|
+
else:
|
|
69
|
+
self.config = {"disabled_providers": []}
|
|
70
|
+
|
|
71
|
+
def _save_config(self):
|
|
72
|
+
"""Save provider configuration."""
|
|
73
|
+
with open(self.config_file, 'w') as f:
|
|
74
|
+
json.dump(self.config, f, indent=2)
|
|
75
|
+
|
|
76
|
+
@property
|
|
77
|
+
@override
|
|
78
|
+
def name(self) -> str:
|
|
79
|
+
"""Get the tool name."""
|
|
80
|
+
return "llm_manage"
|
|
81
|
+
|
|
82
|
+
@property
|
|
83
|
+
@override
|
|
84
|
+
def description(self) -> str:
|
|
85
|
+
"""Get the tool description."""
|
|
86
|
+
return """Manage LLM providers and test configurations.
|
|
87
|
+
|
|
88
|
+
Actions:
|
|
89
|
+
- list: Show all providers and their status
|
|
90
|
+
- models: List all available models (optionally filtered by provider)
|
|
91
|
+
- enable: Enable a provider's tools
|
|
92
|
+
- disable: Disable a provider's tools
|
|
93
|
+
- test: Test a model to verify it works
|
|
94
|
+
|
|
95
|
+
Examples:
|
|
96
|
+
- llm_manage --action list
|
|
97
|
+
- llm_manage --action models
|
|
98
|
+
- llm_manage --action models --provider openai
|
|
99
|
+
- llm_manage --action enable --provider openai
|
|
100
|
+
- llm_manage --action disable --provider perplexity
|
|
101
|
+
- llm_manage --action test --model "gpt-4"
|
|
102
|
+
- llm_manage --action test --provider groq --model "mixtral"
|
|
103
|
+
|
|
104
|
+
Providers are automatically detected based on environment variables:
|
|
105
|
+
- OpenAI: OPENAI_API_KEY
|
|
106
|
+
- Anthropic: ANTHROPIC_API_KEY or CLAUDE_API_KEY
|
|
107
|
+
- Google: GOOGLE_API_KEY or GEMINI_API_KEY
|
|
108
|
+
- Groq: GROQ_API_KEY
|
|
109
|
+
- Mistral: MISTRAL_API_KEY
|
|
110
|
+
- Perplexity: PERPLEXITY_API_KEY
|
|
111
|
+
- And many more...
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
@override
|
|
115
|
+
async def call(
|
|
116
|
+
self,
|
|
117
|
+
ctx: MCPContext,
|
|
118
|
+
**params: Unpack[LLMManageParams],
|
|
119
|
+
) -> str:
|
|
120
|
+
"""Manage LLM providers.
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
ctx: MCP context
|
|
124
|
+
**params: Tool parameters
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
Result of the management action
|
|
128
|
+
"""
|
|
129
|
+
tool_ctx = create_tool_context(ctx)
|
|
130
|
+
await tool_ctx.set_tool_info(self.name)
|
|
131
|
+
|
|
132
|
+
# Extract parameters
|
|
133
|
+
action = params.get("action")
|
|
134
|
+
if not action:
|
|
135
|
+
return "Error: action is required (list, enable, disable, test, models)"
|
|
136
|
+
|
|
137
|
+
provider = params.get("provider")
|
|
138
|
+
model = params.get("model")
|
|
139
|
+
|
|
140
|
+
# Handle different actions
|
|
141
|
+
if action == "list":
|
|
142
|
+
return self._list_providers()
|
|
143
|
+
elif action == "models":
|
|
144
|
+
return self._list_all_models(provider)
|
|
145
|
+
elif action == "enable":
|
|
146
|
+
return self._enable_provider(provider)
|
|
147
|
+
elif action == "disable":
|
|
148
|
+
return self._disable_provider(provider)
|
|
149
|
+
elif action == "test":
|
|
150
|
+
return await self._test_model(ctx, provider, model)
|
|
151
|
+
else:
|
|
152
|
+
return f"Error: Invalid action '{action}'. Must be one of: list, models, enable, disable, test"
|
|
153
|
+
|
|
154
|
+
def _list_providers(self) -> str:
|
|
155
|
+
"""List all providers and their status."""
|
|
156
|
+
output = ["=== LLM Providers ==="]
|
|
157
|
+
output.append("")
|
|
158
|
+
|
|
159
|
+
# Get all possible providers
|
|
160
|
+
all_providers = sorted(LLMTool.API_KEY_ENV_VARS.keys())
|
|
161
|
+
available_providers = self.llm_tool.available_providers
|
|
162
|
+
disabled_providers = self.config.get("disabled_providers", [])
|
|
163
|
+
|
|
164
|
+
# Categorize providers
|
|
165
|
+
active = []
|
|
166
|
+
available_but_disabled = []
|
|
167
|
+
no_api_key = []
|
|
168
|
+
|
|
169
|
+
for provider in all_providers:
|
|
170
|
+
if provider in available_providers:
|
|
171
|
+
if provider in disabled_providers:
|
|
172
|
+
available_but_disabled.append(provider)
|
|
173
|
+
else:
|
|
174
|
+
active.append(provider)
|
|
175
|
+
else:
|
|
176
|
+
no_api_key.append(provider)
|
|
177
|
+
|
|
178
|
+
# Show active providers
|
|
179
|
+
if active:
|
|
180
|
+
output.append("✅ Active Providers (API key found, enabled):")
|
|
181
|
+
for provider in active:
|
|
182
|
+
env_vars = available_providers.get(provider, [])
|
|
183
|
+
output.append(f" - {provider}: {', '.join(env_vars)}")
|
|
184
|
+
|
|
185
|
+
# Show example models
|
|
186
|
+
examples = self._get_example_models(provider)
|
|
187
|
+
if examples:
|
|
188
|
+
output.append(f" Models: {', '.join(examples[:3])}")
|
|
189
|
+
output.append("")
|
|
190
|
+
|
|
191
|
+
# Show disabled providers
|
|
192
|
+
if available_but_disabled:
|
|
193
|
+
output.append("⚠️ Available but Disabled (API key found, disabled):")
|
|
194
|
+
for provider in available_but_disabled:
|
|
195
|
+
env_vars = available_providers.get(provider, [])
|
|
196
|
+
output.append(f" - {provider}: {', '.join(env_vars)}")
|
|
197
|
+
output.append(f" Use: llm_manage --action enable --provider {provider}")
|
|
198
|
+
output.append("")
|
|
199
|
+
|
|
200
|
+
# Show providers without API keys
|
|
201
|
+
if no_api_key:
|
|
202
|
+
output.append("❌ No API Key Found:")
|
|
203
|
+
for provider in no_api_key[:10]: # Show first 10
|
|
204
|
+
env_vars = LLMTool.API_KEY_ENV_VARS.get(provider, [])
|
|
205
|
+
output.append(f" - {provider}: Set one of {', '.join(env_vars)}")
|
|
206
|
+
if len(no_api_key) > 10:
|
|
207
|
+
output.append(f" ... and {len(no_api_key) - 10} more")
|
|
208
|
+
output.append("")
|
|
209
|
+
|
|
210
|
+
# Summary
|
|
211
|
+
output.append("=== Summary ===")
|
|
212
|
+
output.append(f"Total providers: {len(all_providers)}")
|
|
213
|
+
output.append(f"Active: {len(active)}")
|
|
214
|
+
output.append(f"Disabled: {len(available_but_disabled)}")
|
|
215
|
+
output.append(f"No API key: {len(no_api_key)}")
|
|
216
|
+
|
|
217
|
+
# Show available tools
|
|
218
|
+
if active:
|
|
219
|
+
output.append("\n=== Available LLM Tools ===")
|
|
220
|
+
output.append("- llm: Universal LLM tool (all providers)")
|
|
221
|
+
output.append("- consensus: Query multiple models in parallel")
|
|
222
|
+
|
|
223
|
+
provider_tools = []
|
|
224
|
+
for provider in active:
|
|
225
|
+
if provider in ["openai", "anthropic", "google", "groq", "mistral", "perplexity"]:
|
|
226
|
+
tool_name = "gemini" if provider == "google" else provider
|
|
227
|
+
provider_tools.append(tool_name)
|
|
228
|
+
|
|
229
|
+
if provider_tools:
|
|
230
|
+
output.append(f"- Provider tools: {', '.join(provider_tools)}")
|
|
231
|
+
|
|
232
|
+
return "\n".join(output)
|
|
233
|
+
|
|
234
|
+
def _enable_provider(self, provider: Optional[str]) -> str:
|
|
235
|
+
"""Enable a provider."""
|
|
236
|
+
if not provider:
|
|
237
|
+
return "Error: provider is required for enable action"
|
|
238
|
+
|
|
239
|
+
if provider not in self.llm_tool.available_providers:
|
|
240
|
+
env_vars = LLMTool.API_KEY_ENV_VARS.get(provider, [])
|
|
241
|
+
if env_vars:
|
|
242
|
+
return f"Error: No API key found for {provider}. Set one of: {', '.join(env_vars)}"
|
|
243
|
+
else:
|
|
244
|
+
return f"Error: Unknown provider '{provider}'"
|
|
245
|
+
|
|
246
|
+
disabled = self.config.get("disabled_providers", [])
|
|
247
|
+
if provider in disabled:
|
|
248
|
+
disabled.remove(provider)
|
|
249
|
+
self.config["disabled_providers"] = disabled
|
|
250
|
+
self._save_config()
|
|
251
|
+
return f"Successfully enabled {provider}"
|
|
252
|
+
else:
|
|
253
|
+
return f"{provider} is already enabled"
|
|
254
|
+
|
|
255
|
+
def _disable_provider(self, provider: Optional[str]) -> str:
|
|
256
|
+
"""Disable a provider."""
|
|
257
|
+
if not provider:
|
|
258
|
+
return "Error: provider is required for disable action"
|
|
259
|
+
|
|
260
|
+
disabled = self.config.get("disabled_providers", [])
|
|
261
|
+
if provider not in disabled:
|
|
262
|
+
disabled.append(provider)
|
|
263
|
+
self.config["disabled_providers"] = disabled
|
|
264
|
+
self._save_config()
|
|
265
|
+
return f"Successfully disabled {provider}. Its tools will no longer be available."
|
|
266
|
+
else:
|
|
267
|
+
return f"{provider} is already disabled"
|
|
268
|
+
|
|
269
|
+
def _list_all_models(self, provider: Optional[str] = None) -> str:
|
|
270
|
+
"""List all available models from LiteLLM."""
|
|
271
|
+
try:
|
|
272
|
+
from hanzo_mcp.tools.llm.llm_tool import LLMTool
|
|
273
|
+
|
|
274
|
+
all_models = LLMTool.get_all_models()
|
|
275
|
+
|
|
276
|
+
if not all_models:
|
|
277
|
+
return "No models available or LiteLLM not installed"
|
|
278
|
+
|
|
279
|
+
output = ["=== Available LLM Models ==="]
|
|
280
|
+
|
|
281
|
+
if provider:
|
|
282
|
+
# Show models for specific provider
|
|
283
|
+
provider_lower = provider.lower()
|
|
284
|
+
models = all_models.get(provider_lower, [])
|
|
285
|
+
|
|
286
|
+
if not models:
|
|
287
|
+
return f"No models found for provider '{provider}'"
|
|
288
|
+
|
|
289
|
+
output.append(f"\n{provider.upper()} ({len(models)} models):")
|
|
290
|
+
output.append("-" * 40)
|
|
291
|
+
|
|
292
|
+
# Show first 50 models
|
|
293
|
+
for model in models[:50]:
|
|
294
|
+
output.append(f" {model}")
|
|
295
|
+
|
|
296
|
+
if len(models) > 50:
|
|
297
|
+
output.append(f" ... and {len(models) - 50} more")
|
|
298
|
+
else:
|
|
299
|
+
# Show summary of all providers
|
|
300
|
+
total_models = sum(len(models) for models in all_models.values())
|
|
301
|
+
output.append(f"Total models available: {total_models}")
|
|
302
|
+
output.append("")
|
|
303
|
+
|
|
304
|
+
# Show providers with counts
|
|
305
|
+
for provider_name, models in sorted(all_models.items()):
|
|
306
|
+
if models:
|
|
307
|
+
output.append(f"{provider_name}: {len(models)} models")
|
|
308
|
+
|
|
309
|
+
output.append("\nUse 'llm_manage --action models --provider <name>' to see models for a specific provider")
|
|
310
|
+
|
|
311
|
+
# Show recommended models
|
|
312
|
+
output.append("\n=== Recommended Models ===")
|
|
313
|
+
recommended = {
|
|
314
|
+
"OpenAI": ["gpt-4o", "gpt-4o-mini", "gpt-3.5-turbo"],
|
|
315
|
+
"Anthropic": ["claude-3-opus-20240229", "claude-3-5-sonnet-20241022", "claude-3-haiku-20240307"],
|
|
316
|
+
"Google": ["gemini/gemini-1.5-pro", "gemini/gemini-1.5-flash"],
|
|
317
|
+
"Groq": ["groq/llama3-70b-8192", "groq/llama3-8b-8192", "groq/gemma2-9b-it"],
|
|
318
|
+
"Mistral": ["mistral/mistral-large-latest", "mistral/mistral-medium"],
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
for provider_name, models in recommended.items():
|
|
322
|
+
available = LLMTool().available_providers
|
|
323
|
+
provider_key = provider_name.lower()
|
|
324
|
+
|
|
325
|
+
if provider_key in available:
|
|
326
|
+
output.append(f"\n{provider_name} (✅ API key found):")
|
|
327
|
+
for model in models:
|
|
328
|
+
output.append(f" - {model}")
|
|
329
|
+
else:
|
|
330
|
+
output.append(f"\n{provider_name} (❌ No API key):")
|
|
331
|
+
for model in models:
|
|
332
|
+
output.append(f" - {model}")
|
|
333
|
+
|
|
334
|
+
return "\n".join(output)
|
|
335
|
+
|
|
336
|
+
except Exception as e:
|
|
337
|
+
return f"Error listing models: {str(e)}"
|
|
338
|
+
|
|
339
|
+
async def _test_model(self, ctx: MCPContext, provider: Optional[str], model: Optional[str]) -> str:
|
|
340
|
+
"""Test a model to verify it works."""
|
|
341
|
+
if not model and not provider:
|
|
342
|
+
return "Error: Either model or provider is required for test action"
|
|
343
|
+
|
|
344
|
+
# Determine model to test
|
|
345
|
+
if model:
|
|
346
|
+
test_model = model
|
|
347
|
+
else:
|
|
348
|
+
# Use default model for provider
|
|
349
|
+
default_models = {
|
|
350
|
+
"openai": "gpt-3.5-turbo",
|
|
351
|
+
"anthropic": "claude-3-haiku-20240307",
|
|
352
|
+
"google": "gemini/gemini-pro",
|
|
353
|
+
"groq": "groq/mixtral-8x7b-32768",
|
|
354
|
+
"mistral": "mistral/mistral-tiny",
|
|
355
|
+
"perplexity": "perplexity/sonar-small-online",
|
|
356
|
+
}
|
|
357
|
+
test_model = default_models.get(provider)
|
|
358
|
+
if not test_model:
|
|
359
|
+
return f"Error: No default model for provider '{provider}'. Please specify a model."
|
|
360
|
+
|
|
361
|
+
# Test the model
|
|
362
|
+
test_prompt = "Hello! Please respond with 'OK' if you can hear me."
|
|
363
|
+
|
|
364
|
+
output = [f"Testing model: {test_model}"]
|
|
365
|
+
output.append(f"Prompt: {test_prompt}")
|
|
366
|
+
output.append("")
|
|
367
|
+
|
|
368
|
+
try:
|
|
369
|
+
# Call the LLM
|
|
370
|
+
params = {
|
|
371
|
+
"model": test_model,
|
|
372
|
+
"prompt": test_prompt,
|
|
373
|
+
"max_tokens": 10,
|
|
374
|
+
"temperature": 0,
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
response = await self.llm_tool.call(ctx, **params)
|
|
378
|
+
|
|
379
|
+
if response.startswith("Error:"):
|
|
380
|
+
output.append("❌ Test failed:")
|
|
381
|
+
output.append(response)
|
|
382
|
+
else:
|
|
383
|
+
output.append("✅ Test successful!")
|
|
384
|
+
output.append(f"Response: {response}")
|
|
385
|
+
output.append("")
|
|
386
|
+
output.append(f"Model '{test_model}' is working correctly.")
|
|
387
|
+
|
|
388
|
+
# Show provider info
|
|
389
|
+
detected_provider = self.llm_tool._get_provider_for_model(test_model)
|
|
390
|
+
if detected_provider:
|
|
391
|
+
output.append(f"Provider: {detected_provider}")
|
|
392
|
+
|
|
393
|
+
except Exception as e:
|
|
394
|
+
output.append("❌ Test failed with exception:")
|
|
395
|
+
output.append(str(e))
|
|
396
|
+
|
|
397
|
+
return "\n".join(output)
|
|
398
|
+
|
|
399
|
+
def _get_example_models(self, provider: str) -> list[str]:
|
|
400
|
+
"""Get example models for a provider."""
|
|
401
|
+
examples = {
|
|
402
|
+
"openai": ["gpt-4o", "gpt-4", "gpt-3.5-turbo", "o1-preview"],
|
|
403
|
+
"anthropic": ["claude-3-opus-20240229", "claude-3-sonnet-20240229", "claude-3-haiku-20240307"],
|
|
404
|
+
"google": ["gemini/gemini-pro", "gemini/gemini-1.5-pro", "gemini/gemini-1.5-flash"],
|
|
405
|
+
"groq": ["groq/mixtral-8x7b-32768", "groq/llama3-70b-8192", "groq/llama3-8b-8192"],
|
|
406
|
+
"mistral": ["mistral/mistral-large-latest", "mistral/mistral-medium", "mistral/mistral-small"],
|
|
407
|
+
"perplexity": ["perplexity/sonar-medium-online", "perplexity/sonar-small-online"],
|
|
408
|
+
}
|
|
409
|
+
return examples.get(provider, [])
|
|
410
|
+
|
|
411
|
+
def register(self, mcp_server) -> None:
|
|
412
|
+
"""Register this tool with the MCP server."""
|
|
413
|
+
pass
|