hanzo-mcp 0.8.2__py3-none-any.whl → 0.8.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hanzo-mcp might be problematic. Click here for more details.
- hanzo_mcp/__init__.py +15 -2
- hanzo_mcp/bridge.py +133 -127
- hanzo_mcp/cli.py +45 -21
- hanzo_mcp/compute_nodes.py +68 -55
- hanzo_mcp/config/settings.py +11 -0
- hanzo_mcp/core/base_agent.py +520 -0
- hanzo_mcp/core/model_registry.py +436 -0
- hanzo_mcp/dev_server.py +3 -2
- hanzo_mcp/server.py +4 -1
- hanzo_mcp/tools/__init__.py +61 -46
- hanzo_mcp/tools/agent/__init__.py +63 -52
- hanzo_mcp/tools/agent/agent_tool.py +12 -1
- hanzo_mcp/tools/agent/cli_tools.py +543 -0
- hanzo_mcp/tools/agent/network_tool.py +11 -55
- hanzo_mcp/tools/agent/unified_cli_tools.py +259 -0
- hanzo_mcp/tools/common/batch_tool.py +2 -0
- hanzo_mcp/tools/common/context.py +3 -1
- hanzo_mcp/tools/config/config_tool.py +121 -9
- hanzo_mcp/tools/filesystem/__init__.py +18 -0
- hanzo_mcp/tools/llm/__init__.py +44 -16
- hanzo_mcp/tools/llm/llm_tool.py +13 -0
- hanzo_mcp/tools/llm/llm_unified.py +911 -0
- hanzo_mcp/tools/shell/__init__.py +7 -1
- hanzo_mcp/tools/shell/auto_background.py +24 -0
- hanzo_mcp/tools/shell/bash_tool.py +14 -28
- hanzo_mcp/tools/shell/zsh_tool.py +266 -0
- hanzo_mcp-0.8.4.dist-info/METADATA +411 -0
- {hanzo_mcp-0.8.2.dist-info → hanzo_mcp-0.8.4.dist-info}/RECORD +31 -25
- hanzo_mcp-0.8.2.dist-info/METADATA +0 -526
- {hanzo_mcp-0.8.2.dist-info → hanzo_mcp-0.8.4.dist-info}/WHEEL +0 -0
- {hanzo_mcp-0.8.2.dist-info → hanzo_mcp-0.8.4.dist-info}/entry_points.txt +0 -0
- {hanzo_mcp-0.8.2.dist-info → hanzo_mcp-0.8.4.dist-info}/top_level.txt +0 -0
hanzo_mcp/cli.py
CHANGED
|
@@ -1,4 +1,10 @@
|
|
|
1
|
-
"""Command-line interface for the Hanzo AI server.
|
|
1
|
+
"""Command-line interface for the Hanzo AI server.
|
|
2
|
+
|
|
3
|
+
This module intentionally defers heavy imports (like the server and its
|
|
4
|
+
dependencies) until after we determine the transport and configure logging.
|
|
5
|
+
This prevents any stdout/stderr noise from imports that would corrupt the
|
|
6
|
+
MCP stdio transport used by Claude Desktop and other MCP clients.
|
|
7
|
+
"""
|
|
2
8
|
|
|
3
9
|
import os
|
|
4
10
|
import sys
|
|
@@ -9,52 +15,62 @@ import argparse
|
|
|
9
15
|
from typing import Any, cast
|
|
10
16
|
from pathlib import Path
|
|
11
17
|
|
|
12
|
-
from hanzo_mcp.server import HanzoMCPServer
|
|
13
|
-
|
|
14
18
|
|
|
15
19
|
def main() -> None:
|
|
16
20
|
"""Run the CLI for the Hanzo AI server."""
|
|
17
|
-
|
|
18
|
-
# Pre-parse arguments to check transport type early
|
|
19
|
-
import sys
|
|
20
|
-
|
|
21
|
+
# Pre-parse arguments to check transport type early, BEFORE importing server
|
|
21
22
|
early_parser = argparse.ArgumentParser(add_help=False)
|
|
22
23
|
early_parser.add_argument("--transport", choices=["stdio", "sse"], default="stdio")
|
|
23
24
|
early_args, _ = early_parser.parse_known_args()
|
|
24
25
|
|
|
25
26
|
# Configure logging VERY early based on transport
|
|
27
|
+
suppress_stdout = False
|
|
28
|
+
original_stdout = sys.stdout
|
|
26
29
|
if early_args.transport == "stdio":
|
|
27
|
-
# Set environment variable for server to detect stdio mode
|
|
28
|
-
import os
|
|
29
|
-
|
|
30
|
+
# Set environment variable for server to detect stdio mode as early as possible
|
|
30
31
|
os.environ["HANZO_MCP_TRANSPORT"] = "stdio"
|
|
32
|
+
# Aggressively quiet common dependency loggers/warnings in stdio mode
|
|
33
|
+
os.environ.setdefault("PYTHONWARNINGS", "ignore")
|
|
34
|
+
os.environ.setdefault("LITELLM_LOG", "ERROR")
|
|
35
|
+
os.environ.setdefault("LITELLM_LOGGING_LEVEL", "ERROR")
|
|
36
|
+
os.environ.setdefault("FASTMCP_LOG_LEVEL", "ERROR")
|
|
31
37
|
|
|
32
|
-
#
|
|
33
|
-
|
|
38
|
+
# Suppress FastMCP logging (if available) and all standard logging
|
|
39
|
+
try:
|
|
40
|
+
from fastmcp.utilities.logging import configure_logging # type: ignore
|
|
34
41
|
|
|
35
|
-
|
|
36
|
-
|
|
42
|
+
configure_logging(level="ERROR")
|
|
43
|
+
except Exception:
|
|
44
|
+
pass
|
|
37
45
|
|
|
38
|
-
# Also configure standard logging to ERROR level
|
|
39
46
|
logging.basicConfig(
|
|
40
47
|
level=logging.ERROR, # Only show errors
|
|
41
48
|
handlers=[], # No handlers for stdio to prevent protocol corruption
|
|
42
49
|
)
|
|
43
50
|
|
|
44
51
|
# Redirect stderr to devnull for stdio transport to prevent any output
|
|
45
|
-
import sys
|
|
46
|
-
|
|
47
52
|
sys.stderr = open(os.devnull, "w")
|
|
48
53
|
|
|
49
|
-
|
|
54
|
+
# Suppress stdout during potentially noisy imports unless user requested help/version
|
|
55
|
+
if not any(flag in sys.argv for flag in ("--version", "-h", "--help")):
|
|
56
|
+
sys.stdout = open(os.devnull, "w")
|
|
57
|
+
suppress_stdout = True
|
|
58
|
+
|
|
59
|
+
# Import the server only AFTER transport/logging have been configured to avoid import-time noise
|
|
60
|
+
from hanzo_mcp.server import HanzoMCPServer
|
|
61
|
+
|
|
62
|
+
# Avoid importing hanzo_mcp package just to get version (it can have side-effects).
|
|
63
|
+
try:
|
|
64
|
+
from importlib.metadata import version as _pkg_version # py3.8+
|
|
65
|
+
_version = _pkg_version("hanzo-mcp")
|
|
66
|
+
except Exception:
|
|
67
|
+
_version = "unknown"
|
|
50
68
|
|
|
51
69
|
parser = argparse.ArgumentParser(
|
|
52
70
|
description="MCP server implementing Hanzo AI capabilities"
|
|
53
71
|
)
|
|
54
72
|
|
|
55
|
-
parser.add_argument(
|
|
56
|
-
"--version", action="version", version=f"hanzo-mcp {__version__}"
|
|
57
|
-
)
|
|
73
|
+
parser.add_argument("--version", action="version", version=f"hanzo-mcp {_version}")
|
|
58
74
|
|
|
59
75
|
_ = parser.add_argument(
|
|
60
76
|
"--transport",
|
|
@@ -199,6 +215,14 @@ def main() -> None:
|
|
|
199
215
|
|
|
200
216
|
args = parser.parse_args()
|
|
201
217
|
|
|
218
|
+
# Restore stdout after parsing, before any explicit output or server start
|
|
219
|
+
if suppress_stdout:
|
|
220
|
+
try:
|
|
221
|
+
sys.stdout.close() # Close devnull handle
|
|
222
|
+
except Exception:
|
|
223
|
+
pass
|
|
224
|
+
sys.stdout = original_stdout
|
|
225
|
+
|
|
202
226
|
# Cast args attributes to appropriate types to avoid 'Any' warnings
|
|
203
227
|
name: str = cast(str, args.name)
|
|
204
228
|
install: bool = cast(bool, args.install)
|
hanzo_mcp/compute_nodes.py
CHANGED
|
@@ -8,33 +8,39 @@ from typing import Any, Dict, List
|
|
|
8
8
|
|
|
9
9
|
class ComputeNodeDetector:
|
|
10
10
|
"""Detect available compute nodes (GPUs, WebGPU, CPUs) for distributed work."""
|
|
11
|
-
|
|
11
|
+
|
|
12
12
|
@staticmethod
|
|
13
13
|
def detect_local_gpus() -> List[Dict[str, Any]]:
|
|
14
14
|
"""Detect local GPU devices."""
|
|
15
15
|
gpus = []
|
|
16
|
-
|
|
16
|
+
|
|
17
17
|
# Try NVIDIA GPUs
|
|
18
18
|
try:
|
|
19
19
|
result = subprocess.run(
|
|
20
|
-
[
|
|
20
|
+
[
|
|
21
|
+
"nvidia-smi",
|
|
22
|
+
"--query-gpu=name,memory.total",
|
|
23
|
+
"--format=csv,noheader",
|
|
24
|
+
],
|
|
21
25
|
capture_output=True,
|
|
22
26
|
text=True,
|
|
23
|
-
timeout=2
|
|
27
|
+
timeout=2,
|
|
24
28
|
)
|
|
25
29
|
if result.returncode == 0:
|
|
26
|
-
for line in result.stdout.strip().split(
|
|
30
|
+
for line in result.stdout.strip().split("\n"):
|
|
27
31
|
if line:
|
|
28
|
-
name, memory = line.split(
|
|
29
|
-
gpus.append(
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
32
|
+
name, memory = line.split(", ")
|
|
33
|
+
gpus.append(
|
|
34
|
+
{
|
|
35
|
+
"type": "cuda",
|
|
36
|
+
"name": name,
|
|
37
|
+
"memory": memory,
|
|
38
|
+
"id": f"cuda:{len(gpus)}",
|
|
39
|
+
}
|
|
40
|
+
)
|
|
35
41
|
except (FileNotFoundError, subprocess.TimeoutExpired):
|
|
36
42
|
pass
|
|
37
|
-
|
|
43
|
+
|
|
38
44
|
# Try Metal GPUs (macOS)
|
|
39
45
|
if platform.system() == "Darwin":
|
|
40
46
|
try:
|
|
@@ -43,96 +49,103 @@ class ComputeNodeDetector:
|
|
|
43
49
|
["system_profiler", "SPDisplaysDataType"],
|
|
44
50
|
capture_output=True,
|
|
45
51
|
text=True,
|
|
46
|
-
timeout=2
|
|
52
|
+
timeout=2,
|
|
47
53
|
)
|
|
48
54
|
if result.returncode == 0 and "Metal" in result.stdout:
|
|
49
55
|
# Parse GPU info from system_profiler
|
|
50
|
-
lines = result.stdout.split(
|
|
56
|
+
lines = result.stdout.split("\n")
|
|
51
57
|
for i, line in enumerate(lines):
|
|
52
|
-
if
|
|
53
|
-
gpu_name = line.split(
|
|
54
|
-
gpus.append(
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
58
|
+
if "Chipset Model:" in line:
|
|
59
|
+
gpu_name = line.split(":")[1].strip()
|
|
60
|
+
gpus.append(
|
|
61
|
+
{
|
|
62
|
+
"type": "metal",
|
|
63
|
+
"name": gpu_name,
|
|
64
|
+
"memory": "Shared",
|
|
65
|
+
"id": f"metal:{len(gpus)}",
|
|
66
|
+
}
|
|
67
|
+
)
|
|
60
68
|
except (FileNotFoundError, subprocess.TimeoutExpired):
|
|
61
69
|
pass
|
|
62
|
-
|
|
70
|
+
|
|
63
71
|
return gpus
|
|
64
|
-
|
|
72
|
+
|
|
65
73
|
@staticmethod
|
|
66
74
|
def detect_webgpu_nodes() -> List[Dict[str, Any]]:
|
|
67
75
|
"""Detect connected WebGPU nodes (from browsers)."""
|
|
68
76
|
webgpu_nodes = []
|
|
69
|
-
|
|
77
|
+
|
|
70
78
|
# Check for WebGPU connections (would need actual WebSocket/server to track)
|
|
71
79
|
# For now, check if a WebGPU server is running
|
|
72
80
|
webgpu_port = os.environ.get("HANZO_WEBGPU_PORT", "8765")
|
|
73
81
|
try:
|
|
74
82
|
import socket
|
|
83
|
+
|
|
75
84
|
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
76
|
-
result = sock.connect_ex((
|
|
85
|
+
result = sock.connect_ex(("localhost", int(webgpu_port)))
|
|
77
86
|
sock.close()
|
|
78
87
|
if result == 0:
|
|
79
|
-
webgpu_nodes.append(
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
88
|
+
webgpu_nodes.append(
|
|
89
|
+
{
|
|
90
|
+
"type": "webgpu",
|
|
91
|
+
"name": "Chrome WebGPU",
|
|
92
|
+
"memory": "Browser",
|
|
93
|
+
"id": "webgpu:0",
|
|
94
|
+
}
|
|
95
|
+
)
|
|
85
96
|
except Exception:
|
|
86
97
|
pass
|
|
87
|
-
|
|
98
|
+
|
|
88
99
|
return webgpu_nodes
|
|
89
|
-
|
|
100
|
+
|
|
90
101
|
@staticmethod
|
|
91
102
|
def detect_cpu_nodes() -> List[Dict[str, Any]]:
|
|
92
103
|
"""Detect CPU compute nodes."""
|
|
93
104
|
import multiprocessing
|
|
94
|
-
|
|
95
|
-
return [
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
105
|
+
|
|
106
|
+
return [
|
|
107
|
+
{
|
|
108
|
+
"type": "cpu",
|
|
109
|
+
"name": f"{platform.processor() or 'CPU'}",
|
|
110
|
+
"cores": multiprocessing.cpu_count(),
|
|
111
|
+
"id": "cpu:0",
|
|
112
|
+
}
|
|
113
|
+
]
|
|
114
|
+
|
|
102
115
|
@classmethod
|
|
103
116
|
def get_all_nodes(cls) -> List[Dict[str, Any]]:
|
|
104
117
|
"""Get all available compute nodes."""
|
|
105
118
|
nodes = []
|
|
106
|
-
|
|
119
|
+
|
|
107
120
|
# Detect GPUs
|
|
108
121
|
gpus = cls.detect_local_gpus()
|
|
109
122
|
nodes.extend(gpus)
|
|
110
|
-
|
|
123
|
+
|
|
111
124
|
# Detect WebGPU connections
|
|
112
125
|
webgpu = cls.detect_webgpu_nodes()
|
|
113
126
|
nodes.extend(webgpu)
|
|
114
|
-
|
|
127
|
+
|
|
115
128
|
# If no GPUs/WebGPU, add CPU as compute node
|
|
116
129
|
if not nodes:
|
|
117
130
|
nodes.extend(cls.detect_cpu_nodes())
|
|
118
|
-
|
|
131
|
+
|
|
119
132
|
return nodes
|
|
120
|
-
|
|
133
|
+
|
|
121
134
|
@classmethod
|
|
122
135
|
def get_node_count(cls) -> int:
|
|
123
136
|
"""Get total number of available compute nodes."""
|
|
124
137
|
return len(cls.get_all_nodes())
|
|
125
|
-
|
|
138
|
+
|
|
126
139
|
@classmethod
|
|
127
140
|
def get_node_summary(cls) -> str:
|
|
128
141
|
"""Get a summary string of available nodes."""
|
|
129
142
|
nodes = cls.get_all_nodes()
|
|
130
143
|
if not nodes:
|
|
131
144
|
return "No compute nodes available"
|
|
132
|
-
|
|
145
|
+
|
|
133
146
|
count = len(nodes)
|
|
134
147
|
node_word = "node" if count == 1 else "nodes"
|
|
135
|
-
|
|
148
|
+
|
|
136
149
|
# Group by type
|
|
137
150
|
types = {}
|
|
138
151
|
for node in nodes:
|
|
@@ -140,7 +153,7 @@ class ComputeNodeDetector:
|
|
|
140
153
|
if node_type not in types:
|
|
141
154
|
types[node_type] = 0
|
|
142
155
|
types[node_type] += 1
|
|
143
|
-
|
|
156
|
+
|
|
144
157
|
# Build summary
|
|
145
158
|
parts = []
|
|
146
159
|
for node_type, type_count in types.items():
|
|
@@ -152,7 +165,7 @@ class ComputeNodeDetector:
|
|
|
152
165
|
parts.append(f"{type_count} WebGPU")
|
|
153
166
|
elif node_type == "cpu":
|
|
154
167
|
parts.append(f"{type_count} CPU")
|
|
155
|
-
|
|
168
|
+
|
|
156
169
|
type_str = ", ".join(parts)
|
|
157
170
|
return f"{count} {node_word} available ({type_str})"
|
|
158
171
|
|
|
@@ -161,7 +174,7 @@ def print_node_status():
|
|
|
161
174
|
"""Print current node status."""
|
|
162
175
|
detector = ComputeNodeDetector()
|
|
163
176
|
nodes = detector.get_all_nodes()
|
|
164
|
-
|
|
177
|
+
|
|
165
178
|
print(f"\n🖥️ Compute Nodes: {len(nodes)}")
|
|
166
179
|
for node in nodes:
|
|
167
180
|
if node["type"] in ["cuda", "metal"]:
|
|
@@ -176,4 +189,4 @@ def print_node_status():
|
|
|
176
189
|
if __name__ == "__main__":
|
|
177
190
|
# Test the detector
|
|
178
191
|
print_node_status()
|
|
179
|
-
print(ComputeNodeDetector.get_node_summary())
|
|
192
|
+
print(ComputeNodeDetector.get_node_summary())
|
hanzo_mcp/config/settings.py
CHANGED
|
@@ -514,6 +514,17 @@ def _merge_config(
|
|
|
514
514
|
|
|
515
515
|
merged = deep_merge(base_dict, config_dict)
|
|
516
516
|
|
|
517
|
+
# Backwards/forwards compatibility: support a structured "tools" section
|
|
518
|
+
# where each tool can define { enabled: bool, ...options } and map it to
|
|
519
|
+
# the existing enabled_tools/disabled_tools layout.
|
|
520
|
+
tools_cfg = merged.get("tools", {})
|
|
521
|
+
if isinstance(tools_cfg, dict):
|
|
522
|
+
enabled_tools = dict(merged.get("enabled_tools", {}))
|
|
523
|
+
for tool_name, tool_data in tools_cfg.items():
|
|
524
|
+
if isinstance(tool_data, dict) and "enabled" in tool_data:
|
|
525
|
+
enabled_tools[tool_name] = bool(tool_data.get("enabled"))
|
|
526
|
+
merged["enabled_tools"] = enabled_tools
|
|
527
|
+
|
|
517
528
|
# Reconstruct the settings object
|
|
518
529
|
mcp_servers = {}
|
|
519
530
|
for name, server_data in merged.get("mcp_servers", {}).items():
|