hanzo-mcp 0.5.0__py3-none-any.whl → 0.5.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hanzo-mcp might be problematic. Click here for more details.

Files changed (60) hide show
  1. hanzo_mcp/__init__.py +1 -1
  2. hanzo_mcp/config/settings.py +61 -0
  3. hanzo_mcp/tools/__init__.py +158 -12
  4. hanzo_mcp/tools/common/base.py +7 -2
  5. hanzo_mcp/tools/common/config_tool.py +396 -0
  6. hanzo_mcp/tools/common/stats.py +261 -0
  7. hanzo_mcp/tools/common/tool_disable.py +144 -0
  8. hanzo_mcp/tools/common/tool_enable.py +182 -0
  9. hanzo_mcp/tools/common/tool_list.py +263 -0
  10. hanzo_mcp/tools/database/__init__.py +71 -0
  11. hanzo_mcp/tools/database/database_manager.py +246 -0
  12. hanzo_mcp/tools/database/graph_add.py +257 -0
  13. hanzo_mcp/tools/database/graph_query.py +536 -0
  14. hanzo_mcp/tools/database/graph_remove.py +267 -0
  15. hanzo_mcp/tools/database/graph_search.py +348 -0
  16. hanzo_mcp/tools/database/graph_stats.py +345 -0
  17. hanzo_mcp/tools/database/sql_query.py +229 -0
  18. hanzo_mcp/tools/database/sql_search.py +296 -0
  19. hanzo_mcp/tools/database/sql_stats.py +254 -0
  20. hanzo_mcp/tools/editor/__init__.py +11 -0
  21. hanzo_mcp/tools/editor/neovim_command.py +272 -0
  22. hanzo_mcp/tools/editor/neovim_edit.py +290 -0
  23. hanzo_mcp/tools/editor/neovim_session.py +356 -0
  24. hanzo_mcp/tools/filesystem/__init__.py +20 -1
  25. hanzo_mcp/tools/filesystem/batch_search.py +812 -0
  26. hanzo_mcp/tools/filesystem/find_files.py +348 -0
  27. hanzo_mcp/tools/filesystem/git_search.py +505 -0
  28. hanzo_mcp/tools/llm/__init__.py +27 -0
  29. hanzo_mcp/tools/llm/consensus_tool.py +351 -0
  30. hanzo_mcp/tools/llm/llm_manage.py +413 -0
  31. hanzo_mcp/tools/llm/llm_tool.py +346 -0
  32. hanzo_mcp/tools/llm/provider_tools.py +412 -0
  33. hanzo_mcp/tools/mcp/__init__.py +11 -0
  34. hanzo_mcp/tools/mcp/mcp_add.py +263 -0
  35. hanzo_mcp/tools/mcp/mcp_remove.py +127 -0
  36. hanzo_mcp/tools/mcp/mcp_stats.py +165 -0
  37. hanzo_mcp/tools/shell/__init__.py +27 -7
  38. hanzo_mcp/tools/shell/logs.py +265 -0
  39. hanzo_mcp/tools/shell/npx.py +194 -0
  40. hanzo_mcp/tools/shell/npx_background.py +254 -0
  41. hanzo_mcp/tools/shell/pkill.py +262 -0
  42. hanzo_mcp/tools/shell/processes.py +279 -0
  43. hanzo_mcp/tools/shell/run_background.py +326 -0
  44. hanzo_mcp/tools/shell/uvx.py +187 -0
  45. hanzo_mcp/tools/shell/uvx_background.py +249 -0
  46. hanzo_mcp/tools/vector/__init__.py +21 -12
  47. hanzo_mcp/tools/vector/ast_analyzer.py +459 -0
  48. hanzo_mcp/tools/vector/git_ingester.py +485 -0
  49. hanzo_mcp/tools/vector/index_tool.py +358 -0
  50. hanzo_mcp/tools/vector/infinity_store.py +465 -1
  51. hanzo_mcp/tools/vector/mock_infinity.py +162 -0
  52. hanzo_mcp/tools/vector/vector_index.py +7 -6
  53. hanzo_mcp/tools/vector/vector_search.py +22 -7
  54. {hanzo_mcp-0.5.0.dist-info → hanzo_mcp-0.5.2.dist-info}/METADATA +68 -20
  55. hanzo_mcp-0.5.2.dist-info/RECORD +106 -0
  56. hanzo_mcp-0.5.0.dist-info/RECORD +0 -63
  57. {hanzo_mcp-0.5.0.dist-info → hanzo_mcp-0.5.2.dist-info}/WHEEL +0 -0
  58. {hanzo_mcp-0.5.0.dist-info → hanzo_mcp-0.5.2.dist-info}/entry_points.txt +0 -0
  59. {hanzo_mcp-0.5.0.dist-info → hanzo_mcp-0.5.2.dist-info}/licenses/LICENSE +0 -0
  60. {hanzo_mcp-0.5.0.dist-info → hanzo_mcp-0.5.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,412 @@
1
+ """Provider-specific LLM tools."""
2
+
3
+ import os
4
+ from typing import Annotated, Optional, TypedDict, Unpack, final, override, Dict, Any
5
+
6
+ from fastmcp import Context as MCPContext
7
+ from pydantic import Field
8
+
9
+ from hanzo_mcp.tools.common.base import BaseTool
10
+ from hanzo_mcp.tools.common.context import create_tool_context
11
+ from hanzo_mcp.tools.llm.llm_tool import LLMTool
12
+
13
+
14
+ Prompt = Annotated[
15
+ str,
16
+ Field(
17
+ description="The prompt or question to send to the model",
18
+ min_length=1,
19
+ ),
20
+ ]
21
+
22
+ Model = Annotated[
23
+ Optional[str],
24
+ Field(
25
+ description="Specific model variant (defaults to provider's best model)",
26
+ default=None,
27
+ ),
28
+ ]
29
+
30
+ SystemPrompt = Annotated[
31
+ Optional[str],
32
+ Field(
33
+ description="System prompt to set context",
34
+ default=None,
35
+ ),
36
+ ]
37
+
38
+ Temperature = Annotated[
39
+ float,
40
+ Field(
41
+ description="Temperature for response randomness (0.0-2.0)",
42
+ default=0.7,
43
+ ),
44
+ ]
45
+
46
+ MaxTokens = Annotated[
47
+ Optional[int],
48
+ Field(
49
+ description="Maximum tokens in response",
50
+ default=None,
51
+ ),
52
+ ]
53
+
54
+ JsonMode = Annotated[
55
+ bool,
56
+ Field(
57
+ description="Request JSON formatted response",
58
+ default=False,
59
+ ),
60
+ ]
61
+
62
+
63
+ class ProviderToolParams(TypedDict, total=False):
64
+ """Parameters for provider-specific tools."""
65
+
66
+ prompt: str
67
+ model: Optional[str]
68
+ system_prompt: Optional[str]
69
+ temperature: float
70
+ max_tokens: Optional[int]
71
+ json_mode: bool
72
+
73
+
74
+ class BaseProviderTool(BaseTool):
75
+ """Base class for provider-specific LLM tools."""
76
+
77
+ def __init__(self, provider: str, default_model: str, model_variants: Dict[str, str]):
78
+ """Initialize provider tool.
79
+
80
+ Args:
81
+ provider: Provider name
82
+ default_model: Default model to use
83
+ model_variants: Map of short names to full model names
84
+ """
85
+ self.provider = provider
86
+ self.default_model = default_model
87
+ self.model_variants = model_variants
88
+ self.llm_tool = LLMTool()
89
+ self.is_available = provider in self.llm_tool.available_providers
90
+
91
+ def get_full_model_name(self, model: Optional[str]) -> str:
92
+ """Get full model name from short name or default."""
93
+ if not model:
94
+ return self.default_model
95
+
96
+ # Check if it's a short name
97
+ if model in self.model_variants:
98
+ return self.model_variants[model]
99
+
100
+ # Return as-is if not found (assume full name)
101
+ return model
102
+
103
+ @override
104
+ async def call(
105
+ self,
106
+ ctx: MCPContext,
107
+ **params: Unpack[ProviderToolParams],
108
+ ) -> str:
109
+ """Call the provider's LLM."""
110
+ if not self.is_available:
111
+ env_vars = LLMTool.API_KEY_ENV_VARS.get(self.provider, [])
112
+ return f"Error: {self.provider.title()} API key not found. Set one of: {', '.join(env_vars)}"
113
+
114
+ # Get full model name
115
+ model = self.get_full_model_name(params.get("model"))
116
+
117
+ # Prepare LLM tool parameters
118
+ llm_params = {
119
+ "model": model,
120
+ "prompt": params["prompt"],
121
+ }
122
+
123
+ # Add optional parameters
124
+ if "system_prompt" in params:
125
+ llm_params["system_prompt"] = params["system_prompt"]
126
+ if "temperature" in params:
127
+ llm_params["temperature"] = params["temperature"]
128
+ if "max_tokens" in params:
129
+ llm_params["max_tokens"] = params["max_tokens"]
130
+ if "json_mode" in params:
131
+ llm_params["json_mode"] = params["json_mode"]
132
+
133
+ # Call the LLM tool
134
+ return await self.llm_tool.call(ctx, **llm_params)
135
+
136
+ def register(self, mcp_server) -> None:
137
+ """Register this tool with the MCP server."""
138
+ pass
139
+
140
+
141
+ @final
142
+ class OpenAITool(BaseProviderTool):
143
+ """OpenAI-specific LLM tool."""
144
+
145
+ def __init__(self):
146
+ super().__init__(
147
+ provider="openai",
148
+ default_model="gpt-4o",
149
+ model_variants={
150
+ "4o": "gpt-4o",
151
+ "4": "gpt-4",
152
+ "3.5": "gpt-3.5-turbo",
153
+ "o1": "o1-preview",
154
+ "o1-mini": "o1-mini",
155
+ "4-turbo": "gpt-4-turbo-preview",
156
+ "4-vision": "gpt-4-vision-preview",
157
+ }
158
+ )
159
+
160
+ @property
161
+ @override
162
+ def name(self) -> str:
163
+ return "openai"
164
+
165
+ @property
166
+ @override
167
+ def description(self) -> str:
168
+ status = "✓ Available" if self.is_available else "✗ No API key"
169
+ return f"""Query OpenAI models directly ({status}).
170
+
171
+ Models:
172
+ - 4o (default): GPT-4o - Latest and most capable
173
+ - 4: GPT-4 - Advanced reasoning
174
+ - 3.5: GPT-3.5 Turbo - Fast and efficient
175
+ - o1: O1 Preview - Chain of thought reasoning
176
+ - o1-mini: O1 Mini - Smaller reasoning model
177
+
178
+ Examples:
179
+ - openai --prompt "Explain quantum computing"
180
+ - openai --model 4 --prompt "Write a Python function"
181
+ - openai --model o1 --prompt "Solve this step by step"
182
+ """
183
+
184
+
185
+ @final
186
+ class AnthropicTool(BaseProviderTool):
187
+ """Anthropic-specific LLM tool."""
188
+
189
+ def __init__(self):
190
+ super().__init__(
191
+ provider="anthropic",
192
+ default_model="claude-3-sonnet-20240229",
193
+ model_variants={
194
+ "opus": "claude-3-opus-20240229",
195
+ "sonnet": "claude-3-sonnet-20240229",
196
+ "haiku": "claude-3-haiku-20240307",
197
+ "2.1": "claude-2.1",
198
+ "2": "claude-2",
199
+ "instant": "claude-instant-1.2",
200
+ }
201
+ )
202
+
203
+ @property
204
+ @override
205
+ def name(self) -> str:
206
+ return "anthropic"
207
+
208
+ @property
209
+ @override
210
+ def description(self) -> str:
211
+ status = "✓ Available" if self.is_available else "✗ No API key"
212
+ return f"""Query Anthropic Claude models directly ({status}).
213
+
214
+ Models:
215
+ - sonnet (default): Claude 3 Sonnet - Balanced performance
216
+ - opus: Claude 3 Opus - Most capable
217
+ - haiku: Claude 3 Haiku - Fast and efficient
218
+ - 2.1: Claude 2.1 - Previous generation
219
+ - instant: Claude Instant - Very fast
220
+
221
+ Examples:
222
+ - anthropic --prompt "Analyze this code"
223
+ - anthropic --model opus --prompt "Write a detailed essay"
224
+ - anthropic --model haiku --prompt "Quick question"
225
+ """
226
+
227
+
228
+ @final
229
+ class GeminiTool(BaseProviderTool):
230
+ """Google Gemini-specific LLM tool."""
231
+
232
+ def __init__(self):
233
+ super().__init__(
234
+ provider="google",
235
+ default_model="gemini/gemini-pro",
236
+ model_variants={
237
+ "pro": "gemini/gemini-pro",
238
+ "pro-vision": "gemini/gemini-pro-vision",
239
+ "1.5-pro": "gemini/gemini-1.5-pro-latest",
240
+ "1.5-flash": "gemini/gemini-1.5-flash-latest",
241
+ "ultra": "gemini/gemini-ultra",
242
+ }
243
+ )
244
+
245
+ @property
246
+ @override
247
+ def name(self) -> str:
248
+ return "gemini"
249
+
250
+ @property
251
+ @override
252
+ def description(self) -> str:
253
+ status = "✓ Available" if self.is_available else "✗ No API key"
254
+ return f"""Query Google Gemini models directly ({status}).
255
+
256
+ Models:
257
+ - pro (default): Gemini Pro - Balanced model
258
+ - 1.5-pro: Gemini 1.5 Pro - Advanced with long context
259
+ - 1.5-flash: Gemini 1.5 Flash - Fast and efficient
260
+ - pro-vision: Gemini Pro Vision - Multimodal
261
+ - ultra: Gemini Ultra - Most capable (if available)
262
+
263
+ Examples:
264
+ - gemini --prompt "Explain this concept"
265
+ - gemini --model 1.5-pro --prompt "Analyze this long document"
266
+ - gemini --model 1.5-flash --prompt "Quick task"
267
+ """
268
+
269
+
270
+ @final
271
+ class GroqTool(BaseProviderTool):
272
+ """Groq-specific LLM tool."""
273
+
274
+ def __init__(self):
275
+ super().__init__(
276
+ provider="groq",
277
+ default_model="groq/mixtral-8x7b-32768",
278
+ model_variants={
279
+ "mixtral": "groq/mixtral-8x7b-32768",
280
+ "llama3-70b": "groq/llama3-70b-8192",
281
+ "llama3-8b": "groq/llama3-8b-8192",
282
+ "llama2-70b": "groq/llama2-70b-4096",
283
+ "gemma-7b": "groq/gemma-7b-it",
284
+ }
285
+ )
286
+
287
+ @property
288
+ @override
289
+ def name(self) -> str:
290
+ return "groq"
291
+
292
+ @property
293
+ @override
294
+ def description(self) -> str:
295
+ status = "✓ Available" if self.is_available else "✗ No API key"
296
+ return f"""Query Groq LPU models - ultra-fast inference ({status}).
297
+
298
+ Models:
299
+ - mixtral (default): Mixtral 8x7B - High quality
300
+ - llama3-70b: Llama 3 70B - Very capable
301
+ - llama3-8b: Llama 3 8B - Fast and efficient
302
+ - llama2-70b: Llama 2 70B - Previous gen
303
+ - gemma-7b: Google Gemma 7B - Efficient
304
+
305
+ Examples:
306
+ - groq --prompt "Fast response needed"
307
+ - groq --model llama3-70b --prompt "Complex reasoning"
308
+ - groq --model gemma-7b --prompt "Quick task"
309
+ """
310
+
311
+
312
+ @final
313
+ class MistralTool(BaseProviderTool):
314
+ """Mistral-specific LLM tool."""
315
+
316
+ def __init__(self):
317
+ super().__init__(
318
+ provider="mistral",
319
+ default_model="mistral/mistral-medium",
320
+ model_variants={
321
+ "tiny": "mistral/mistral-tiny",
322
+ "small": "mistral/mistral-small-latest",
323
+ "medium": "mistral/mistral-medium-latest",
324
+ "large": "mistral/mistral-large-latest",
325
+ "embed": "mistral/mistral-embed",
326
+ }
327
+ )
328
+
329
+ @property
330
+ @override
331
+ def name(self) -> str:
332
+ return "mistral"
333
+
334
+ @property
335
+ @override
336
+ def description(self) -> str:
337
+ status = "✓ Available" if self.is_available else "✗ No API key"
338
+ return f"""Query Mistral AI models directly ({status}).
339
+
340
+ Models:
341
+ - medium (default): Mistral Medium - Balanced
342
+ - large: Mistral Large - Most capable
343
+ - small: Mistral Small - Efficient
344
+ - tiny: Mistral Tiny - Very fast
345
+
346
+ Examples:
347
+ - mistral --prompt "Explain this"
348
+ - mistral --model large --prompt "Complex analysis"
349
+ - mistral --model tiny --prompt "Quick response"
350
+ """
351
+
352
+
353
+ @final
354
+ class PerplexityTool(BaseProviderTool):
355
+ """Perplexity-specific LLM tool."""
356
+
357
+ def __init__(self):
358
+ super().__init__(
359
+ provider="perplexity",
360
+ default_model="perplexity/sonar-medium-online",
361
+ model_variants={
362
+ "sonar-small": "perplexity/sonar-small-online",
363
+ "sonar-medium": "perplexity/sonar-medium-online",
364
+ "sonar-small-chat": "perplexity/sonar-small-chat",
365
+ "sonar-medium-chat": "perplexity/sonar-medium-chat",
366
+ }
367
+ )
368
+
369
+ @property
370
+ @override
371
+ def name(self) -> str:
372
+ return "perplexity"
373
+
374
+ @property
375
+ @override
376
+ def description(self) -> str:
377
+ status = "✓ Available" if self.is_available else "✗ No API key"
378
+ return f"""Query Perplexity models with internet access ({status}).
379
+
380
+ Models:
381
+ - sonar-medium (default): Online search + reasoning
382
+ - sonar-small: Faster online search
383
+ - sonar-medium-chat: Chat without search
384
+ - sonar-small-chat: Fast chat without search
385
+
386
+ Examples:
387
+ - perplexity --prompt "Latest news about AI"
388
+ - perplexity --model sonar-small --prompt "Quick fact check"
389
+ - perplexity --model sonar-medium-chat --prompt "Explain without search"
390
+ """
391
+
392
+
393
+ # Export all provider tools
394
+ PROVIDER_TOOLS = [
395
+ OpenAITool,
396
+ AnthropicTool,
397
+ GeminiTool,
398
+ GroqTool,
399
+ MistralTool,
400
+ PerplexityTool,
401
+ ]
402
+
403
+
404
+ def create_provider_tools() -> list[BaseTool]:
405
+ """Create instances of all provider tools."""
406
+ tools = []
407
+ for tool_class in PROVIDER_TOOLS:
408
+ tool = tool_class()
409
+ # Only include tools with available API keys
410
+ if tool.is_available:
411
+ tools.append(tool)
412
+ return tools
@@ -0,0 +1,11 @@
1
+ """MCP management tools."""
2
+
3
+ from hanzo_mcp.tools.mcp.mcp_add import McpAddTool
4
+ from hanzo_mcp.tools.mcp.mcp_remove import McpRemoveTool
5
+ from hanzo_mcp.tools.mcp.mcp_stats import McpStatsTool
6
+
7
+ __all__ = [
8
+ "McpAddTool",
9
+ "McpRemoveTool",
10
+ "McpStatsTool",
11
+ ]
@@ -0,0 +1,263 @@
1
+ """Add MCP servers dynamically."""
2
+
3
+ import json
4
+ import subprocess
5
+ import shutil
6
+ from typing import Annotated, Optional, TypedDict, Unpack, final, override, Dict, Any
7
+ from pathlib import Path
8
+
9
+ from fastmcp import Context as MCPContext
10
+ from pydantic import Field
11
+
12
+ from hanzo_mcp.tools.common.base import BaseTool
13
+ from hanzo_mcp.tools.common.context import create_tool_context
14
+
15
+
16
+ ServerCommand = Annotated[
17
+ str,
18
+ Field(
19
+ description="Server command (e.g., 'uvx mcp-server-git', 'npx @modelcontextprotocol/server-filesystem')",
20
+ min_length=1,
21
+ ),
22
+ ]
23
+
24
+ ServerName = Annotated[
25
+ str,
26
+ Field(
27
+ description="Unique name for the server",
28
+ min_length=1,
29
+ ),
30
+ ]
31
+
32
+ Args = Annotated[
33
+ Optional[str],
34
+ Field(
35
+ description="Additional arguments for the server",
36
+ default=None,
37
+ ),
38
+ ]
39
+
40
+ Env = Annotated[
41
+ Optional[Dict[str, str]],
42
+ Field(
43
+ description="Environment variables for the server",
44
+ default=None,
45
+ ),
46
+ ]
47
+
48
+ AutoStart = Annotated[
49
+ bool,
50
+ Field(
51
+ description="Automatically start the server after adding",
52
+ default=True,
53
+ ),
54
+ ]
55
+
56
+
57
+ class McpAddParams(TypedDict, total=False):
58
+ """Parameters for MCP add tool."""
59
+
60
+ command: str
61
+ name: str
62
+ args: Optional[str]
63
+ env: Optional[Dict[str, str]]
64
+ auto_start: bool
65
+
66
+
67
+ @final
68
+ class McpAddTool(BaseTool):
69
+ """Tool for adding MCP servers dynamically."""
70
+
71
+ # Class variable to store added servers
72
+ _mcp_servers: Dict[str, Dict[str, Any]] = {}
73
+ _config_file = Path.home() / ".hanzo" / "mcp" / "servers.json"
74
+
75
+ def __init__(self):
76
+ """Initialize the MCP add tool."""
77
+ # Load existing servers from config
78
+ self._load_servers()
79
+
80
+ @classmethod
81
+ def _load_servers(cls):
82
+ """Load servers from config file."""
83
+ if cls._config_file.exists():
84
+ try:
85
+ with open(cls._config_file, 'r') as f:
86
+ cls._mcp_servers = json.load(f)
87
+ except Exception:
88
+ cls._mcp_servers = {}
89
+
90
+ @classmethod
91
+ def _save_servers(cls):
92
+ """Save servers to config file."""
93
+ cls._config_file.parent.mkdir(parents=True, exist_ok=True)
94
+ with open(cls._config_file, 'w') as f:
95
+ json.dump(cls._mcp_servers, f, indent=2)
96
+
97
+ @classmethod
98
+ def get_servers(cls) -> Dict[str, Dict[str, Any]]:
99
+ """Get all registered MCP servers."""
100
+ return cls._mcp_servers.copy()
101
+
102
+ @property
103
+ @override
104
+ def name(self) -> str:
105
+ """Get the tool name."""
106
+ return "mcp_add"
107
+
108
+ @property
109
+ @override
110
+ def description(self) -> str:
111
+ """Get the tool description."""
112
+ return """Add MCP (Model Context Protocol) servers dynamically.
113
+
114
+ This allows adding new MCP servers that provide additional tools.
115
+ Servers can be from npm packages or Python packages.
116
+
117
+ Common MCP servers:
118
+ - @modelcontextprotocol/server-filesystem - File system access
119
+ - @modelcontextprotocol/server-github - GitHub integration
120
+ - @modelcontextprotocol/server-gitlab - GitLab integration
121
+ - @modelcontextprotocol/server-postgres - PostgreSQL access
122
+ - @modelcontextprotocol/server-sqlite - SQLite access
123
+ - mcp-server-git - Git operations
124
+ - mcp-server-docker - Docker management
125
+ - mcp-server-kubernetes - K8s management
126
+
127
+ Examples:
128
+ - mcp_add --command "npx @modelcontextprotocol/server-filesystem" --name filesystem --args "/path/to/allow"
129
+ - mcp_add --command "uvx mcp-server-git" --name git --args "--repository /path/to/repo"
130
+ - mcp_add --command "npx @modelcontextprotocol/server-github" --name github --env '{"GITHUB_TOKEN": "..."}'
131
+
132
+ Use 'mcp_stats' to see all added servers and their status.
133
+ """
134
+
135
+ @override
136
+ async def call(
137
+ self,
138
+ ctx: MCPContext,
139
+ **params: Unpack[McpAddParams],
140
+ ) -> str:
141
+ """Add an MCP server.
142
+
143
+ Args:
144
+ ctx: MCP context
145
+ **params: Tool parameters
146
+
147
+ Returns:
148
+ Result of adding the server
149
+ """
150
+ tool_ctx = create_tool_context(ctx)
151
+ await tool_ctx.set_tool_info(self.name)
152
+
153
+ # Extract parameters
154
+ command = params.get("command")
155
+ if not command:
156
+ return "Error: command is required"
157
+
158
+ name = params.get("name")
159
+ if not name:
160
+ return "Error: name is required"
161
+
162
+ args = params.get("args")
163
+ env = params.get("env", {})
164
+ auto_start = params.get("auto_start", True)
165
+
166
+ # Check if server already exists
167
+ if name in self._mcp_servers:
168
+ return f"Error: Server '{name}' already exists. Use mcp_remove to remove it first."
169
+
170
+ # Parse command to determine type
171
+ server_type = "unknown"
172
+ if command.startswith("npx"):
173
+ server_type = "node"
174
+ elif command.startswith("uvx"):
175
+ server_type = "python"
176
+ elif command.startswith("python"):
177
+ server_type = "python"
178
+ elif command.startswith("node"):
179
+ server_type = "node"
180
+
181
+ # Build full command
182
+ full_command = [command]
183
+ if args:
184
+ import shlex
185
+ # If command contains spaces, split it first
186
+ if ' ' in command:
187
+ full_command = shlex.split(command)
188
+ full_command.extend(shlex.split(args))
189
+ else:
190
+ if ' ' in command:
191
+ import shlex
192
+ full_command = shlex.split(command)
193
+
194
+ await tool_ctx.info(f"Adding MCP server '{name}' with command: {' '.join(full_command)}")
195
+
196
+ # Create server configuration
197
+ server_config = {
198
+ "command": full_command,
199
+ "name": name,
200
+ "type": server_type,
201
+ "env": env,
202
+ "status": "stopped",
203
+ "process_id": None,
204
+ "tools": [],
205
+ "resources": [],
206
+ "prompts": []
207
+ }
208
+
209
+ # Test if command is valid
210
+ if auto_start:
211
+ try:
212
+ # Try to start the server briefly to validate
213
+ test_env = {**env} if env else {}
214
+
215
+ # Quick test to see if command exists
216
+ test_cmd = full_command[0]
217
+ if test_cmd == "npx":
218
+ if not shutil.which("npx"):
219
+ return "Error: npx not found. Install Node.js first."
220
+ elif test_cmd == "uvx":
221
+ if not shutil.which("uvx"):
222
+ return "Error: uvx not found. Install uv first."
223
+
224
+ # TODO: Actually start and connect to the MCP server
225
+ # For now, we just store the configuration
226
+ server_config["status"] = "ready"
227
+
228
+ except Exception as e:
229
+ await tool_ctx.error(f"Failed to validate server: {str(e)}")
230
+ server_config["status"] = "error"
231
+ server_config["error"] = str(e)
232
+
233
+ # Add server to registry
234
+ self._mcp_servers[name] = server_config
235
+ self._save_servers()
236
+
237
+ output = [
238
+ f"Successfully added MCP server '{name}':",
239
+ f" Type: {server_type}",
240
+ f" Command: {' '.join(full_command)}",
241
+ f" Status: {server_config['status']}",
242
+ ]
243
+
244
+ if env:
245
+ output.append(f" Environment: {list(env.keys())}")
246
+
247
+ output.extend([
248
+ "",
249
+ "Use 'mcp_stats' to see server details.",
250
+ f"Use 'mcp_remove --name {name}' to remove this server."
251
+ ])
252
+
253
+ # Note: In a real implementation, we would:
254
+ # 1. Start the MCP server process
255
+ # 2. Connect to it via stdio/HTTP
256
+ # 3. Query its capabilities (tools, resources, prompts)
257
+ # 4. Register those with our MCP server
258
+
259
+ return "\n".join(output)
260
+
261
+ def register(self, mcp_server) -> None:
262
+ """Register this tool with the MCP server."""
263
+ pass