hanzo-mcp 0.8.2__py3-none-any.whl → 0.8.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hanzo-mcp might be problematic. Click here for more details.
- hanzo_mcp/__init__.py +14 -1
- hanzo_mcp/bridge.py +133 -127
- hanzo_mcp/compute_nodes.py +68 -55
- hanzo_mcp/config/settings.py +11 -0
- hanzo_mcp/core/base_agent.py +521 -0
- hanzo_mcp/core/model_registry.py +436 -0
- hanzo_mcp/dev_server.py +3 -2
- hanzo_mcp/server.py +4 -1
- hanzo_mcp/tools/__init__.py +61 -46
- hanzo_mcp/tools/agent/__init__.py +19 -35
- hanzo_mcp/tools/agent/cli_tools.py +544 -0
- hanzo_mcp/tools/agent/unified_cli_tools.py +259 -0
- hanzo_mcp/tools/common/batch_tool.py +2 -0
- hanzo_mcp/tools/common/context.py +3 -1
- hanzo_mcp/tools/config/config_tool.py +121 -9
- hanzo_mcp/tools/filesystem/__init__.py +18 -0
- hanzo_mcp/tools/llm/__init__.py +44 -16
- hanzo_mcp/tools/llm/llm_tool.py +13 -0
- hanzo_mcp/tools/llm/llm_unified.py +911 -0
- hanzo_mcp/tools/shell/auto_background.py +24 -0
- {hanzo_mcp-0.8.2.dist-info → hanzo_mcp-0.8.3.dist-info}/METADATA +1 -1
- {hanzo_mcp-0.8.2.dist-info → hanzo_mcp-0.8.3.dist-info}/RECORD +25 -20
- {hanzo_mcp-0.8.2.dist-info → hanzo_mcp-0.8.3.dist-info}/WHEEL +0 -0
- {hanzo_mcp-0.8.2.dist-info → hanzo_mcp-0.8.3.dist-info}/entry_points.txt +0 -0
- {hanzo_mcp-0.8.2.dist-info → hanzo_mcp-0.8.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,436 @@
|
|
|
1
|
+
"""Unified Model Registry - Single source of truth for all AI model mappings.
|
|
2
|
+
|
|
3
|
+
This module provides a centralized registry for AI model configurations,
|
|
4
|
+
eliminating duplication and ensuring consistency across the codebase.
|
|
5
|
+
Thread-safe singleton implementation.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import threading
|
|
11
|
+
from dataclasses import dataclass, field
|
|
12
|
+
from typing import Dict, List, Optional, Set, Any
|
|
13
|
+
from enum import Enum
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class ModelProvider(Enum):
|
|
17
|
+
"""Enumeration of AI model providers."""
|
|
18
|
+
|
|
19
|
+
ANTHROPIC = "anthropic"
|
|
20
|
+
OPENAI = "openai"
|
|
21
|
+
GOOGLE = "google"
|
|
22
|
+
XAI = "xai"
|
|
23
|
+
OLLAMA = "ollama"
|
|
24
|
+
DEEPSEEK = "deepseek"
|
|
25
|
+
MISTRAL = "mistral"
|
|
26
|
+
META = "meta"
|
|
27
|
+
HANZO = "hanzo"
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass(frozen=True)
|
|
31
|
+
class ModelConfig:
|
|
32
|
+
"""Configuration for a single AI model."""
|
|
33
|
+
|
|
34
|
+
full_name: str
|
|
35
|
+
provider: ModelProvider
|
|
36
|
+
aliases: Set[str] = field(default_factory=set)
|
|
37
|
+
default_params: Dict[str, Any] = field(default_factory=dict)
|
|
38
|
+
supports_vision: bool = False
|
|
39
|
+
supports_tools: bool = False
|
|
40
|
+
supports_streaming: bool = True
|
|
41
|
+
context_window: int = 8192
|
|
42
|
+
max_output: int = 4096
|
|
43
|
+
api_key_env: Optional[str] = None
|
|
44
|
+
cli_command: Optional[str] = None
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class ModelRegistry:
|
|
48
|
+
"""Centralized registry for all AI models.
|
|
49
|
+
|
|
50
|
+
Thread-safe singleton implementation ensuring single source of truth
|
|
51
|
+
for model configurations across the codebase.
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
_instance: Optional[ModelRegistry] = None
|
|
55
|
+
_lock = threading.Lock()
|
|
56
|
+
_models: Dict[str, ModelConfig] = {}
|
|
57
|
+
_initialized = False
|
|
58
|
+
|
|
59
|
+
def __new__(cls) -> ModelRegistry:
|
|
60
|
+
"""Thread-safe singleton pattern."""
|
|
61
|
+
if cls._instance is None:
|
|
62
|
+
with cls._lock:
|
|
63
|
+
if cls._instance is None:
|
|
64
|
+
cls._instance = super().__new__(cls)
|
|
65
|
+
return cls._instance
|
|
66
|
+
|
|
67
|
+
def __init__(self) -> None:
|
|
68
|
+
"""Initialize model registry once."""
|
|
69
|
+
if not self._initialized:
|
|
70
|
+
with self._lock:
|
|
71
|
+
if not self._initialized:
|
|
72
|
+
self._initialize_models()
|
|
73
|
+
self._initialized = True
|
|
74
|
+
|
|
75
|
+
def _initialize_models(self) -> None:
|
|
76
|
+
"""Initialize all model configurations."""
|
|
77
|
+
# Claude models
|
|
78
|
+
self._register(ModelConfig(
|
|
79
|
+
full_name="claude-3-5-sonnet-20241022",
|
|
80
|
+
provider=ModelProvider.ANTHROPIC,
|
|
81
|
+
aliases={"claude", "cc", "claude-code", "sonnet", "sonnet-4.1"},
|
|
82
|
+
supports_vision=True,
|
|
83
|
+
supports_tools=True,
|
|
84
|
+
context_window=200000,
|
|
85
|
+
max_output=8192,
|
|
86
|
+
api_key_env="ANTHROPIC_API_KEY",
|
|
87
|
+
cli_command="claude",
|
|
88
|
+
))
|
|
89
|
+
|
|
90
|
+
self._register(ModelConfig(
|
|
91
|
+
full_name="claude-opus-4-1-20250805",
|
|
92
|
+
provider=ModelProvider.ANTHROPIC,
|
|
93
|
+
aliases={"opus", "opus-4.1", "claude-opus"},
|
|
94
|
+
supports_vision=True,
|
|
95
|
+
supports_tools=True,
|
|
96
|
+
context_window=200000,
|
|
97
|
+
max_output=8192,
|
|
98
|
+
api_key_env="ANTHROPIC_API_KEY",
|
|
99
|
+
cli_command="claude",
|
|
100
|
+
))
|
|
101
|
+
|
|
102
|
+
self._register(ModelConfig(
|
|
103
|
+
full_name="claude-3-haiku-20240307",
|
|
104
|
+
provider=ModelProvider.ANTHROPIC,
|
|
105
|
+
aliases={"haiku", "claude-haiku"},
|
|
106
|
+
supports_vision=True,
|
|
107
|
+
supports_tools=True,
|
|
108
|
+
context_window=200000,
|
|
109
|
+
max_output=4096,
|
|
110
|
+
api_key_env="ANTHROPIC_API_KEY",
|
|
111
|
+
cli_command="claude",
|
|
112
|
+
))
|
|
113
|
+
|
|
114
|
+
# OpenAI models
|
|
115
|
+
self._register(ModelConfig(
|
|
116
|
+
full_name="gpt-4-turbo",
|
|
117
|
+
provider=ModelProvider.OPENAI,
|
|
118
|
+
aliases={"gpt4", "gpt-4", "codex"},
|
|
119
|
+
supports_vision=True,
|
|
120
|
+
supports_tools=True,
|
|
121
|
+
context_window=128000,
|
|
122
|
+
max_output=4096,
|
|
123
|
+
api_key_env="OPENAI_API_KEY",
|
|
124
|
+
cli_command="openai",
|
|
125
|
+
))
|
|
126
|
+
|
|
127
|
+
self._register(ModelConfig(
|
|
128
|
+
full_name="gpt-5-turbo",
|
|
129
|
+
provider=ModelProvider.OPENAI,
|
|
130
|
+
aliases={"gpt5", "gpt-5"},
|
|
131
|
+
supports_vision=True,
|
|
132
|
+
supports_tools=True,
|
|
133
|
+
context_window=256000,
|
|
134
|
+
max_output=16384,
|
|
135
|
+
api_key_env="OPENAI_API_KEY",
|
|
136
|
+
cli_command="openai",
|
|
137
|
+
))
|
|
138
|
+
|
|
139
|
+
self._register(ModelConfig(
|
|
140
|
+
full_name="o1-preview",
|
|
141
|
+
provider=ModelProvider.OPENAI,
|
|
142
|
+
aliases={"o1", "openai-o1"},
|
|
143
|
+
supports_vision=False,
|
|
144
|
+
supports_tools=False,
|
|
145
|
+
context_window=128000,
|
|
146
|
+
max_output=32768,
|
|
147
|
+
api_key_env="OPENAI_API_KEY",
|
|
148
|
+
cli_command="openai",
|
|
149
|
+
))
|
|
150
|
+
|
|
151
|
+
# Google models
|
|
152
|
+
self._register(ModelConfig(
|
|
153
|
+
full_name="gemini-2.0-flash-exp",
|
|
154
|
+
provider=ModelProvider.GOOGLE,
|
|
155
|
+
aliases={"gemini-2", "gemini-2.0", "gemini2"},
|
|
156
|
+
supports_vision=True,
|
|
157
|
+
supports_tools=True,
|
|
158
|
+
context_window=1000000,
|
|
159
|
+
max_output=8192,
|
|
160
|
+
api_key_env="GEMINI_API_KEY",
|
|
161
|
+
cli_command="gemini",
|
|
162
|
+
))
|
|
163
|
+
|
|
164
|
+
self._register(ModelConfig(
|
|
165
|
+
full_name="gemini-exp-1206",
|
|
166
|
+
provider=ModelProvider.GOOGLE,
|
|
167
|
+
aliases={"gemini-2.5", "gemini-2.5-pro", "gemini-pro-2.5"},
|
|
168
|
+
supports_vision=True,
|
|
169
|
+
supports_tools=True,
|
|
170
|
+
context_window=2000000,
|
|
171
|
+
max_output=8192,
|
|
172
|
+
api_key_env="GEMINI_API_KEY",
|
|
173
|
+
cli_command="gemini",
|
|
174
|
+
))
|
|
175
|
+
|
|
176
|
+
self._register(ModelConfig(
|
|
177
|
+
full_name="gemini-1.5-pro",
|
|
178
|
+
provider=ModelProvider.GOOGLE,
|
|
179
|
+
aliases={"gemini", "gemini-pro", "gemini-1.5"},
|
|
180
|
+
supports_vision=True,
|
|
181
|
+
supports_tools=True,
|
|
182
|
+
context_window=2000000,
|
|
183
|
+
max_output=8192,
|
|
184
|
+
api_key_env="GEMINI_API_KEY",
|
|
185
|
+
cli_command="gemini",
|
|
186
|
+
))
|
|
187
|
+
|
|
188
|
+
self._register(ModelConfig(
|
|
189
|
+
full_name="gemini-1.5-flash",
|
|
190
|
+
provider=ModelProvider.GOOGLE,
|
|
191
|
+
aliases={"gemini-flash", "flash"},
|
|
192
|
+
supports_vision=True,
|
|
193
|
+
supports_tools=True,
|
|
194
|
+
context_window=1000000,
|
|
195
|
+
max_output=8192,
|
|
196
|
+
api_key_env="GEMINI_API_KEY",
|
|
197
|
+
cli_command="gemini",
|
|
198
|
+
))
|
|
199
|
+
|
|
200
|
+
# xAI models
|
|
201
|
+
self._register(ModelConfig(
|
|
202
|
+
full_name="grok-4",
|
|
203
|
+
provider=ModelProvider.XAI,
|
|
204
|
+
aliases={"grok", "xai-grok", "grok-2"}, # grok-2 for backward compat
|
|
205
|
+
supports_vision=True, # Grok-4 supports multimodal
|
|
206
|
+
supports_tools=True,
|
|
207
|
+
context_window=128000,
|
|
208
|
+
max_output=8192,
|
|
209
|
+
api_key_env="XAI_API_KEY",
|
|
210
|
+
cli_command="grok",
|
|
211
|
+
))
|
|
212
|
+
|
|
213
|
+
# Ollama models
|
|
214
|
+
self._register(ModelConfig(
|
|
215
|
+
full_name="ollama/llama-3.2-3b",
|
|
216
|
+
provider=ModelProvider.OLLAMA,
|
|
217
|
+
aliases={"llama", "llama-3.2", "llama3"},
|
|
218
|
+
supports_vision=False,
|
|
219
|
+
supports_tools=False,
|
|
220
|
+
context_window=128000,
|
|
221
|
+
max_output=4096,
|
|
222
|
+
api_key_env=None, # Local model
|
|
223
|
+
cli_command="ollama",
|
|
224
|
+
))
|
|
225
|
+
|
|
226
|
+
self._register(ModelConfig(
|
|
227
|
+
full_name="ollama/mistral:7b",
|
|
228
|
+
provider=ModelProvider.MISTRAL,
|
|
229
|
+
aliases={"mistral", "mistral-7b"},
|
|
230
|
+
supports_vision=False,
|
|
231
|
+
supports_tools=False,
|
|
232
|
+
context_window=32000,
|
|
233
|
+
max_output=4096,
|
|
234
|
+
api_key_env=None, # Local model
|
|
235
|
+
cli_command="ollama",
|
|
236
|
+
))
|
|
237
|
+
|
|
238
|
+
# DeepSeek models
|
|
239
|
+
self._register(ModelConfig(
|
|
240
|
+
full_name="deepseek-coder-v2",
|
|
241
|
+
provider=ModelProvider.DEEPSEEK,
|
|
242
|
+
aliases={"deepseek", "deepseek-coder"},
|
|
243
|
+
supports_vision=False,
|
|
244
|
+
supports_tools=True,
|
|
245
|
+
context_window=128000,
|
|
246
|
+
max_output=8192,
|
|
247
|
+
api_key_env="DEEPSEEK_API_KEY",
|
|
248
|
+
cli_command="deepseek",
|
|
249
|
+
))
|
|
250
|
+
|
|
251
|
+
def _register(self, config: ModelConfig) -> None:
|
|
252
|
+
"""Register a model configuration.
|
|
253
|
+
|
|
254
|
+
Args:
|
|
255
|
+
config: Model configuration to register
|
|
256
|
+
"""
|
|
257
|
+
# Register by full name
|
|
258
|
+
self._models[config.full_name] = config
|
|
259
|
+
|
|
260
|
+
# Register all aliases
|
|
261
|
+
for alias in config.aliases:
|
|
262
|
+
self._models[alias.lower()] = config
|
|
263
|
+
|
|
264
|
+
def get(self, model_name: str) -> Optional[ModelConfig]:
|
|
265
|
+
"""Get model configuration by name or alias.
|
|
266
|
+
|
|
267
|
+
Args:
|
|
268
|
+
model_name: Model name or alias
|
|
269
|
+
|
|
270
|
+
Returns:
|
|
271
|
+
Model configuration or None if not found
|
|
272
|
+
"""
|
|
273
|
+
return self._models.get(model_name.lower())
|
|
274
|
+
|
|
275
|
+
def resolve(self, model_name: str) -> str:
|
|
276
|
+
"""Resolve model name or alias to full model name.
|
|
277
|
+
|
|
278
|
+
Args:
|
|
279
|
+
model_name: Model name or alias
|
|
280
|
+
|
|
281
|
+
Returns:
|
|
282
|
+
Full model name, or original if not found
|
|
283
|
+
"""
|
|
284
|
+
config = self.get(model_name)
|
|
285
|
+
return config.full_name if config else model_name
|
|
286
|
+
|
|
287
|
+
def get_by_provider(self, provider: ModelProvider) -> List[ModelConfig]:
|
|
288
|
+
"""Get all unique models for a specific provider.
|
|
289
|
+
|
|
290
|
+
Args:
|
|
291
|
+
provider: Model provider
|
|
292
|
+
|
|
293
|
+
Returns:
|
|
294
|
+
List of unique model configurations
|
|
295
|
+
"""
|
|
296
|
+
seen_names = set()
|
|
297
|
+
results = []
|
|
298
|
+
for config in self._models.values():
|
|
299
|
+
if config.provider == provider and config.full_name not in seen_names:
|
|
300
|
+
seen_names.add(config.full_name)
|
|
301
|
+
results.append(config)
|
|
302
|
+
return results
|
|
303
|
+
|
|
304
|
+
def get_models_supporting(
|
|
305
|
+
self,
|
|
306
|
+
vision: Optional[bool] = None,
|
|
307
|
+
tools: Optional[bool] = None,
|
|
308
|
+
streaming: Optional[bool] = None,
|
|
309
|
+
) -> List[ModelConfig]:
|
|
310
|
+
"""Get unique models supporting specific features.
|
|
311
|
+
|
|
312
|
+
Args:
|
|
313
|
+
vision: Filter by vision support
|
|
314
|
+
tools: Filter by tool support
|
|
315
|
+
streaming: Filter by streaming support
|
|
316
|
+
|
|
317
|
+
Returns:
|
|
318
|
+
List of unique matching model configurations
|
|
319
|
+
"""
|
|
320
|
+
seen_names = set()
|
|
321
|
+
results = []
|
|
322
|
+
|
|
323
|
+
for config in self._models.values():
|
|
324
|
+
if config.full_name in seen_names:
|
|
325
|
+
continue
|
|
326
|
+
|
|
327
|
+
if vision is not None and config.supports_vision != vision:
|
|
328
|
+
continue
|
|
329
|
+
if tools is not None and config.supports_tools != tools:
|
|
330
|
+
continue
|
|
331
|
+
if streaming is not None and config.supports_streaming != streaming:
|
|
332
|
+
continue
|
|
333
|
+
|
|
334
|
+
seen_names.add(config.full_name)
|
|
335
|
+
results.append(config)
|
|
336
|
+
|
|
337
|
+
return results
|
|
338
|
+
|
|
339
|
+
def get_api_key_env(self, model_name: str) -> Optional[str]:
|
|
340
|
+
"""Get the API key environment variable for a model.
|
|
341
|
+
|
|
342
|
+
Args:
|
|
343
|
+
model_name: Model name or alias
|
|
344
|
+
|
|
345
|
+
Returns:
|
|
346
|
+
Environment variable name or None
|
|
347
|
+
"""
|
|
348
|
+
config = self.get(model_name)
|
|
349
|
+
return config.api_key_env if config else None
|
|
350
|
+
|
|
351
|
+
def get_cli_command(self, model_name: str) -> Optional[str]:
|
|
352
|
+
"""Get the CLI command for a model.
|
|
353
|
+
|
|
354
|
+
Args:
|
|
355
|
+
model_name: Model name or alias
|
|
356
|
+
|
|
357
|
+
Returns:
|
|
358
|
+
CLI command or None
|
|
359
|
+
"""
|
|
360
|
+
config = self.get(model_name)
|
|
361
|
+
return config.cli_command if config else None
|
|
362
|
+
|
|
363
|
+
def list_all_models(self) -> List[str]:
|
|
364
|
+
"""List all unique model full names.
|
|
365
|
+
|
|
366
|
+
Returns:
|
|
367
|
+
Sorted list of unique full model names
|
|
368
|
+
"""
|
|
369
|
+
seen_names = set()
|
|
370
|
+
for config in self._models.values():
|
|
371
|
+
seen_names.add(config.full_name)
|
|
372
|
+
return sorted(list(seen_names))
|
|
373
|
+
|
|
374
|
+
def list_all_aliases(self) -> Dict[str, str]:
|
|
375
|
+
"""List all aliases and their full names.
|
|
376
|
+
|
|
377
|
+
Returns:
|
|
378
|
+
Dictionary mapping aliases to full names
|
|
379
|
+
"""
|
|
380
|
+
result = {}
|
|
381
|
+
for key, config in self._models.items():
|
|
382
|
+
if key != config.full_name:
|
|
383
|
+
result[key] = config.full_name
|
|
384
|
+
return result
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
# Global singleton instance
|
|
388
|
+
registry = ModelRegistry()
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
# Convenience functions
|
|
392
|
+
def resolve_model(model_name: str) -> str:
|
|
393
|
+
"""Resolve model name or alias to full model name.
|
|
394
|
+
|
|
395
|
+
Args:
|
|
396
|
+
model_name: Model name or alias
|
|
397
|
+
|
|
398
|
+
Returns:
|
|
399
|
+
Full model name
|
|
400
|
+
"""
|
|
401
|
+
return registry.resolve(model_name)
|
|
402
|
+
|
|
403
|
+
|
|
404
|
+
def get_model_config(model_name: str) -> Optional[ModelConfig]:
|
|
405
|
+
"""Get model configuration.
|
|
406
|
+
|
|
407
|
+
Args:
|
|
408
|
+
model_name: Model name or alias
|
|
409
|
+
|
|
410
|
+
Returns:
|
|
411
|
+
Model configuration or None
|
|
412
|
+
"""
|
|
413
|
+
return registry.get(model_name)
|
|
414
|
+
|
|
415
|
+
|
|
416
|
+
def get_api_key_env(model_name: str) -> Optional[str]:
|
|
417
|
+
"""Get API key environment variable for model.
|
|
418
|
+
|
|
419
|
+
Args:
|
|
420
|
+
model_name: Model name or alias
|
|
421
|
+
|
|
422
|
+
Returns:
|
|
423
|
+
Environment variable name or None
|
|
424
|
+
"""
|
|
425
|
+
return registry.get_api_key_env(model_name)
|
|
426
|
+
|
|
427
|
+
|
|
428
|
+
__all__ = [
|
|
429
|
+
"ModelProvider",
|
|
430
|
+
"ModelConfig",
|
|
431
|
+
"ModelRegistry",
|
|
432
|
+
"registry",
|
|
433
|
+
"resolve_model",
|
|
434
|
+
"get_model_config",
|
|
435
|
+
"get_api_key_env",
|
|
436
|
+
]
|
hanzo_mcp/dev_server.py
CHANGED
|
@@ -172,17 +172,18 @@ class DevServer:
|
|
|
172
172
|
|
|
173
173
|
logger = logging.getLogger(__name__)
|
|
174
174
|
logger.info(f"\n🚀 Starting Hanzo AI in development mode...")
|
|
175
|
-
|
|
175
|
+
|
|
176
176
|
# Show compute nodes
|
|
177
177
|
try:
|
|
178
178
|
from hanzo_mcp.compute_nodes import ComputeNodeDetector
|
|
179
|
+
|
|
179
180
|
detector = ComputeNodeDetector()
|
|
180
181
|
summary = detector.get_node_summary()
|
|
181
182
|
logger.info(f"🖥️ {summary}")
|
|
182
183
|
except Exception:
|
|
183
184
|
# Silently ignore if compute node detection fails
|
|
184
185
|
pass
|
|
185
|
-
|
|
186
|
+
|
|
186
187
|
logger.info(f"🔧 Hot reload enabled - watching for file changes")
|
|
187
188
|
logger.info(f"📁 Project: {self.project_dir or 'current directory'}")
|
|
188
189
|
logger.info(f"🌐 Transport: {transport}\n")
|
hanzo_mcp/server.py
CHANGED
|
@@ -9,7 +9,9 @@ import threading
|
|
|
9
9
|
from typing import Literal, cast, final
|
|
10
10
|
|
|
11
11
|
# Suppress litellm deprecation warnings about event loop
|
|
12
|
-
warnings.filterwarnings(
|
|
12
|
+
warnings.filterwarnings(
|
|
13
|
+
"ignore", message="There is no current event loop", category=DeprecationWarning
|
|
14
|
+
)
|
|
13
15
|
|
|
14
16
|
try:
|
|
15
17
|
from fastmcp import FastMCP
|
|
@@ -224,6 +226,7 @@ class HanzoMCPServer:
|
|
|
224
226
|
if transport != "stdio" and not os.environ.get("HANZO_QUIET"):
|
|
225
227
|
try:
|
|
226
228
|
from hanzo_mcp.compute_nodes import ComputeNodeDetector
|
|
229
|
+
|
|
227
230
|
detector = ComputeNodeDetector()
|
|
228
231
|
summary = detector.get_node_summary()
|
|
229
232
|
logger = logging.getLogger(__name__)
|
hanzo_mcp/tools/__init__.py
CHANGED
|
@@ -1,55 +1,70 @@
|
|
|
1
1
|
"""Tools package for Hanzo AI.
|
|
2
2
|
|
|
3
|
-
This package contains all
|
|
4
|
-
|
|
3
|
+
This package contains all tools for the Hanzo MCP server. To keep imports
|
|
4
|
+
robust across environments (e.g., older Python during CI), heavy imports are
|
|
5
|
+
guarded. Submodules can still be imported directly, e.g.:
|
|
5
6
|
|
|
6
|
-
|
|
7
|
-
improved performance for complex tool-based interactions when Claude has a dedicated
|
|
8
|
-
space for structured thinking. It also includes an "agent" tool that enables Claude
|
|
9
|
-
to delegate tasks to sub-agents for concurrent execution and specialized processing.
|
|
7
|
+
from hanzo_mcp.tools.llm.llm_tool import LLMTool
|
|
10
8
|
"""
|
|
11
9
|
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
from hanzo_mcp.tools.todo import register_todo_tools
|
|
15
|
-
from hanzo_mcp.tools.agent import register_agent_tools
|
|
16
|
-
from hanzo_mcp.tools.shell import register_shell_tools
|
|
17
|
-
from hanzo_mcp.tools.common import (
|
|
18
|
-
register_batch_tool,
|
|
19
|
-
register_critic_tool,
|
|
20
|
-
register_thinking_tool,
|
|
21
|
-
)
|
|
22
|
-
from hanzo_mcp.tools.vector import register_vector_tools
|
|
23
|
-
from hanzo_mcp.tools.jupyter import register_jupyter_tools
|
|
24
|
-
from hanzo_mcp.tools.database import DatabaseManager, register_database_tools
|
|
25
|
-
from hanzo_mcp.tools.filesystem import register_filesystem_tools
|
|
26
|
-
from hanzo_mcp.tools.common.base import BaseTool
|
|
27
|
-
from hanzo_mcp.tools.common.stats import StatsTool
|
|
28
|
-
from hanzo_mcp.tools.common.tool_list import ToolListTool
|
|
29
|
-
from hanzo_mcp.tools.common.permissions import PermissionManager
|
|
30
|
-
from hanzo_mcp.tools.common.tool_enable import ToolEnableTool
|
|
31
|
-
from hanzo_mcp.tools.common.tool_disable import ToolDisableTool
|
|
32
|
-
|
|
33
|
-
# Try to import memory tools, but don't fail if hanzo-memory is not installed
|
|
34
|
-
try:
|
|
35
|
-
from hanzo_mcp.tools.memory import register_memory_tools
|
|
10
|
+
# Defer annotation evaluation to avoid import-time NameErrors in constrained envs
|
|
11
|
+
from __future__ import annotations
|
|
36
12
|
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
from hanzo_mcp.tools.
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
13
|
+
# NOTE: Keep top-level imports resilient for environments without optional deps
|
|
14
|
+
try: # pragma: no cover - import guards for CI environments
|
|
15
|
+
from mcp.server import FastMCP # type: ignore
|
|
16
|
+
except Exception: # pragma: no cover
|
|
17
|
+
|
|
18
|
+
class FastMCP: # type: ignore
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
try: # pragma: no cover
|
|
23
|
+
from hanzo_mcp.tools.llm import (
|
|
24
|
+
LLMTool,
|
|
25
|
+
ConsensusTool,
|
|
26
|
+
LLMManageTool,
|
|
27
|
+
create_provider_tools,
|
|
28
|
+
)
|
|
29
|
+
from hanzo_mcp.tools.mcp import (
|
|
30
|
+
MCPTool,
|
|
31
|
+
McpAddTool,
|
|
32
|
+
McpStatsTool,
|
|
33
|
+
McpRemoveTool,
|
|
34
|
+
)
|
|
35
|
+
from hanzo_mcp.tools.todo import register_todo_tools
|
|
36
|
+
from hanzo_mcp.tools.agent import register_agent_tools
|
|
37
|
+
from hanzo_mcp.tools.shell import register_shell_tools
|
|
38
|
+
from hanzo_mcp.tools.editor import (
|
|
39
|
+
NeovimEditTool,
|
|
40
|
+
NeovimCommandTool,
|
|
41
|
+
NeovimSessionTool,
|
|
42
|
+
)
|
|
43
|
+
from hanzo_mcp.tools.vector import register_vector_tools
|
|
44
|
+
from hanzo_mcp.tools.jupyter import register_jupyter_tools
|
|
45
|
+
from hanzo_mcp.tools.database import DatabaseManager, register_database_tools
|
|
46
|
+
from hanzo_mcp.tools.filesystem import register_filesystem_tools
|
|
47
|
+
from hanzo_mcp.tools.common.base import BaseTool
|
|
48
|
+
from hanzo_mcp.tools.common.mode import activate_mode_from_env
|
|
49
|
+
from hanzo_mcp.tools.common.stats import StatsTool
|
|
50
|
+
from hanzo_mcp.tools.common.tool_list import ToolListTool
|
|
51
|
+
from hanzo_mcp.tools.config.mode_tool import mode_tool
|
|
52
|
+
from hanzo_mcp.tools.common.mode_loader import ModeLoader
|
|
53
|
+
from hanzo_mcp.tools.common.permissions import PermissionManager
|
|
54
|
+
from hanzo_mcp.tools.common.tool_enable import ToolEnableTool
|
|
55
|
+
from hanzo_mcp.tools.common.tool_disable import ToolDisableTool
|
|
56
|
+
|
|
57
|
+
# Try memory tools
|
|
58
|
+
try:
|
|
59
|
+
from hanzo_mcp.tools.memory import register_memory_tools
|
|
60
|
+
|
|
61
|
+
MEMORY_TOOLS_AVAILABLE = True
|
|
62
|
+
except Exception:
|
|
63
|
+
MEMORY_TOOLS_AVAILABLE = False
|
|
64
|
+
register_memory_tools = None # type: ignore
|
|
65
|
+
except Exception:
|
|
66
|
+
# Minimal surface to allow submodule imports elsewhere
|
|
67
|
+
pass
|
|
53
68
|
|
|
54
69
|
# Try to import LSP tool
|
|
55
70
|
try:
|
|
@@ -13,11 +13,21 @@ from hanzo_mcp.tools.agent.agent_tool import AgentTool
|
|
|
13
13
|
from hanzo_mcp.tools.agent.swarm_tool import SwarmTool
|
|
14
14
|
from hanzo_mcp.tools.agent.network_tool import NetworkTool
|
|
15
15
|
from hanzo_mcp.tools.common.permissions import PermissionManager
|
|
16
|
-
|
|
16
|
+
# Import unified CLI tools (single source of truth)
|
|
17
|
+
from hanzo_mcp.tools.agent.cli_tools import (
|
|
18
|
+
ClaudeCLITool,
|
|
19
|
+
ClaudeCodeCLITool, # cc alias
|
|
20
|
+
CodexCLITool,
|
|
21
|
+
GeminiCLITool,
|
|
22
|
+
GrokCLITool,
|
|
23
|
+
OpenHandsCLITool,
|
|
24
|
+
OpenHandsShortCLITool, # oh alias
|
|
25
|
+
HanzoDevCLITool,
|
|
26
|
+
ClineCLITool,
|
|
27
|
+
AiderCLITool,
|
|
28
|
+
register_cli_tools,
|
|
29
|
+
)
|
|
17
30
|
from hanzo_mcp.tools.agent.code_auth_tool import CodeAuthTool
|
|
18
|
-
from hanzo_mcp.tools.agent.codex_cli_tool import CodexCLITool
|
|
19
|
-
from hanzo_mcp.tools.agent.claude_cli_tool import ClaudeCLITool
|
|
20
|
-
from hanzo_mcp.tools.agent.gemini_cli_tool import GeminiCLITool
|
|
21
31
|
|
|
22
32
|
|
|
23
33
|
def register_agent_tools(
|
|
@@ -68,27 +78,6 @@ def register_agent_tools(
|
|
|
68
78
|
agent_max_tool_uses=agent_max_tool_uses,
|
|
69
79
|
)
|
|
70
80
|
|
|
71
|
-
# Create CLI agent tools
|
|
72
|
-
claude_cli_tool = ClaudeCLITool(
|
|
73
|
-
permission_manager=permission_manager,
|
|
74
|
-
model=agent_model, # Can override default Sonnet
|
|
75
|
-
)
|
|
76
|
-
|
|
77
|
-
codex_cli_tool = CodexCLITool(
|
|
78
|
-
permission_manager=permission_manager,
|
|
79
|
-
model=agent_model if agent_model and "gpt" in agent_model else None,
|
|
80
|
-
)
|
|
81
|
-
|
|
82
|
-
gemini_cli_tool = GeminiCLITool(
|
|
83
|
-
permission_manager=permission_manager,
|
|
84
|
-
model=agent_model if agent_model and "gemini" in agent_model else None,
|
|
85
|
-
)
|
|
86
|
-
|
|
87
|
-
grok_cli_tool = GrokCLITool(
|
|
88
|
-
permission_manager=permission_manager,
|
|
89
|
-
model=agent_model if agent_model and "grok" in agent_model else None,
|
|
90
|
-
)
|
|
91
|
-
|
|
92
81
|
# Create auth management tool
|
|
93
82
|
code_auth_tool = CodeAuthTool()
|
|
94
83
|
|
|
@@ -98,24 +87,19 @@ def register_agent_tools(
|
|
|
98
87
|
default_mode="hybrid", # Prefer local, fallback to cloud
|
|
99
88
|
)
|
|
100
89
|
|
|
101
|
-
# Register tools
|
|
90
|
+
# Register core agent tools
|
|
102
91
|
ToolRegistry.register_tool(mcp_server, agent_tool)
|
|
103
92
|
ToolRegistry.register_tool(mcp_server, swarm_tool)
|
|
104
93
|
ToolRegistry.register_tool(mcp_server, network_tool)
|
|
105
|
-
ToolRegistry.register_tool(mcp_server, claude_cli_tool)
|
|
106
|
-
ToolRegistry.register_tool(mcp_server, codex_cli_tool)
|
|
107
|
-
ToolRegistry.register_tool(mcp_server, gemini_cli_tool)
|
|
108
|
-
ToolRegistry.register_tool(mcp_server, grok_cli_tool)
|
|
109
94
|
ToolRegistry.register_tool(mcp_server, code_auth_tool)
|
|
110
95
|
|
|
96
|
+
# Register all CLI tools (includes claude, codex, gemini, grok, etc.)
|
|
97
|
+
cli_tools = register_cli_tools(mcp_server, permission_manager)
|
|
98
|
+
|
|
111
99
|
# Return list of registered tools
|
|
112
100
|
return [
|
|
113
101
|
agent_tool,
|
|
114
102
|
swarm_tool,
|
|
115
103
|
network_tool,
|
|
116
|
-
claude_cli_tool,
|
|
117
|
-
codex_cli_tool,
|
|
118
|
-
gemini_cli_tool,
|
|
119
|
-
grok_cli_tool,
|
|
120
104
|
code_auth_tool,
|
|
121
|
-
]
|
|
105
|
+
] + cli_tools
|